From 7061365a56a1d3328631f48c81c34324adc16174 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 27 Mar 2026 15:55:18 +0100 Subject: [PATCH 01/67] XXX: un-CHAP Attempt to create a buildable version of whats released in cardano-node 10.7.0. --- cardano-diffusion/cardano-diffusion.cabal | 8 ++++---- ouroboros-network/ouroboros-network.cabal | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/cardano-diffusion/cardano-diffusion.cabal b/cardano-diffusion/cardano-diffusion.cabal index 831bae38e52..8f7a5381025 100644 --- a/cardano-diffusion/cardano-diffusion.cabal +++ b/cardano-diffusion/cardano-diffusion.cabal @@ -168,7 +168,7 @@ library contra-tracer, deepseq, dns, - io-classes:{io-classes, si-timers, strict-stm} ^>=1.8, + io-classes:{io-classes, si-timers, strict-stm} ^>=1.8 || ^>= 1.9, monoidal-synchronisation, network ^>=3.2.7, network-mux, @@ -297,7 +297,7 @@ library protocols bytestring >=0.10 && <0.13, cardano-diffusion:api, cborg >=0.2.1 && <0.3, - io-classes:{io-classes, si-timers} ^>=1.8, + io-classes:{io-classes, si-timers} ^>=1.8 || ^>= 1.9, ouroboros-network:{api, framework, protocols}, random, typed-protocols:{typed-protocols, cborg, stateful} ^>=1.2, @@ -535,7 +535,7 @@ library subscription containers >=0.5 && <0.9, contra-tracer >=0.1 && <0.3, deepseq, - io-classes:si-timers ^>=1.8.0.1, + io-classes:si-timers ^>=1.8.0.1 || ^>= 1.9, network-mux ^>=0.10.1.0, ouroboros-network:{api, framework}, @@ -558,7 +558,7 @@ library tracing base >=4.14 && <4.23, cardano-diffusion, text, - trace-dispatcher ^>=2.11.0, + trace-dispatcher ^>=2.11.0 || ^>=2.12.0, if flag(asserts) ghc-options: -fno-ignore-asserts diff --git a/ouroboros-network/ouroboros-network.cabal b/ouroboros-network/ouroboros-network.cabal index 95819b7ef30..54480e3653e 100644 --- a/ouroboros-network/ouroboros-network.cabal +++ b/ouroboros-network/ouroboros-network.cabal @@ -118,7 +118,7 @@ library api contra-tracer, deepseq, dns, - io-classes:{io-classes, si-timers, strict-stm} ^>=1.8, + io-classes:{io-classes, si-timers, strict-stm} ^>=1.8 || ^>= 1.9, iproute ^>=1.7.15, measures, network ^>=3.2.7, @@ -310,7 +310,7 @@ library dlist, dns, hashable, - io-classes:{io-classes, mtl, si-timers, strict-mvar, strict-stm} ^>=1.8, + io-classes:{io-classes, mtl, si-timers, strict-mvar, strict-stm} ^>=1.8 || ^>= 1.9, iproute, monoidal-synchronisation, mtl, @@ -383,7 +383,7 @@ library framework contra-tracer, deepseq, hashable, - io-classes:{io-classes, si-timers, strict-stm} ^>=1.8, + io-classes:{io-classes, si-timers, strict-stm} ^>=1.8 || ^>= 1.9, monoidal-synchronisation ^>=0.1.0.7, network ^>=3.2.7, network-mux ^>=0.10.1.0, @@ -424,7 +424,7 @@ library framework-tracing network-mux, ouroboros-network:{framework, orphan-instances}, text, - trace-dispatcher ^>=2.11.0, + trace-dispatcher ^>=2.11.0 || ^>=2.12.0, typed-protocols:{typed-protocols, stateful} ^>=1.2, library tests-lib @@ -473,8 +473,8 @@ library tests-lib contra-tracer, deepseq, deque ^>=0.4, - io-classes:{io-classes, si-timers, strict-stm} ^>=1.8, - io-sim, + io-classes:{io-classes, si-timers, strict-stm} ^>=1.8 || ^>= 1.9, + io-sim ^>=1.8 || ^>= 1.9, network-mux, pretty-simple, psqueues >=0.2.3 && <0.3, @@ -662,7 +662,7 @@ library tracing iproute, ouroboros-network:{ouroboros-network, orphan-instances, protocols}, text, - trace-dispatcher ^>=2.11.0, + trace-dispatcher ^>=2.11.0 || ^>=2.12.0, if flag(asserts) ghc-options: -fno-ignore-asserts @@ -816,7 +816,7 @@ library protocols constraints, containers, deepseq, - io-classes:{io-classes, si-timers} ^>=1.8, + io-classes:{io-classes, si-timers} ^>=1.8 || ^>= 1.9, nothunks, ouroboros-network:api, quiet, From dd1076cf62c2b1e6c07fcf8b00231249620943dd Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 14 Apr 2026 08:59:30 +0200 Subject: [PATCH 02/67] tx-test: fix test case counting of valid and invalid txs In case of duplicate txids, a second invalid tx could be counted as valid because it was tracked by txid alongside the valid duplicate, conflating the two. --- ouroboros-network/ouroboros-network.cabal | 1 + .../Test/Ouroboros/Network/TxSubmission.hs | 2 + .../Network/TxSubmission/MempoolWriter.hs | 46 ++++++++++ .../Ouroboros/Network/TxSubmission/Types.hs | 85 ++++++++++++++++--- 4 files changed, 122 insertions(+), 12 deletions(-) create mode 100644 ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/MempoolWriter.hs diff --git a/ouroboros-network/ouroboros-network.cabal b/ouroboros-network/ouroboros-network.cabal index 54480e3653e..38e45f56e93 100644 --- a/ouroboros-network/ouroboros-network.cabal +++ b/ouroboros-network/ouroboros-network.cabal @@ -972,6 +972,7 @@ library ouroboros-network-tests-lib Test.Ouroboros.Network.TxSubmission.AppV1 Test.Ouroboros.Network.TxSubmission.AppV2 Test.Ouroboros.Network.TxSubmission.Mempool.Simple + Test.Ouroboros.Network.TxSubmission.MempoolWriter Test.Ouroboros.Network.TxSubmission.TxLogic Test.Ouroboros.Network.TxSubmission.Types diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission.hs index 7ea9de6269f..cbfe4db4bc4 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission.hs @@ -2,6 +2,7 @@ module Test.Ouroboros.Network.TxSubmission (tests) where import Test.Ouroboros.Network.TxSubmission.AppV1 qualified as AppV1 import Test.Ouroboros.Network.TxSubmission.AppV2 qualified as AppV2 +import Test.Ouroboros.Network.TxSubmission.MempoolWriter qualified as MempoolWriter import Test.Ouroboros.Network.TxSubmission.TxLogic qualified as TxLogic import Test.Tasty (TestTree, testGroup) @@ -9,6 +10,7 @@ import Test.Tasty (TestTree, testGroup) tests :: TestTree tests = testGroup "Ouroboros.Network.TxSubmission" [ AppV1.tests + , MempoolWriter.tests , TxLogic.tests , AppV2.tests ] diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/MempoolWriter.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/MempoolWriter.hs new file mode 100644 index 00000000000..4915b8fd9c9 --- /dev/null +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/MempoolWriter.hs @@ -0,0 +1,46 @@ +module Test.Ouroboros.Network.TxSubmission.MempoolWriter (tests) where + +import Control.Concurrent.Class.MonadSTM (newTVarIO, readTVarIO) +import Control.Monad.IOSim (runSimOrThrow) + +import Ouroboros.Network.Protocol.TxSubmission2.Type (SizeInBytes (..)) +import Ouroboros.Network.TxSubmission.Inbound.V1 + (TxSubmissionMempoolWriter (..)) + +import Test.Ouroboros.Network.TxSubmission.Types + +import Test.Tasty (TestTree, testGroup) +import Test.Tasty.HUnit (testCaseSteps, (@?=)) + + +tests :: TestTree +tests = testGroup "MempoolWriter" + [ testCaseSteps "getMempoolWriter records only valid duplicates" unit_getMempoolWriter_recordsOnlyValidDuplicates + ] + + +unit_getMempoolWriter_recordsOnlyValidDuplicates :: (String -> IO ()) -> IO () +unit_getMempoolWriter_recordsOnlyValidDuplicates step = do + step "Populate the inbound mempool with one valid tx and submit one invalid duplicate plus one valid duplicate" + let (accepted, rejected, duplicateTxIds) = + runSimOrThrow $ do + duplicateVar <- newTVarIO [] + mempool <- newMempool [mkTx 17 True] + let writer = getMempoolWriter duplicateVar mempool + result <- mempoolAddTxs writer [mkTx 17 False, mkTx 17 True] + duplicates <- readTVarIO duplicateVar + pure (fst result, snd result, duplicates) + + step "Assert both submissions are rejected as duplicates but only the valid duplicate is recorded for result accounting" + accepted @?= [] + rejected @?= [(17, DuplicateTx), (17, DuplicateTx)] + duplicateTxIds @?= [17] + where + mkTx :: TxId -> Bool -> Tx TxId + mkTx txid isValid = + Tx { + getTxId = txid, + getTxSize = SizeInBytes 1, + getTxAdvSize = SizeInBytes 1, + getTxValid = isValid + } diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Types.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Types.hs index 3692779c74f..ace9ed5060a 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Types.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Types.hs @@ -17,6 +17,7 @@ module Test.Ouroboros.Network.TxSubmission.Types , readMempool , getMempoolReader , getMempoolWriter + , InvalidTx (..) , maxTxSize , LargeNonEmptyList (..) , SimResults (..) @@ -31,6 +32,7 @@ import Prelude hiding (seq) import NoThunks.Class import Control.Concurrent.Class.MonadSTM +import Control.Concurrent.Class.MonadSTM.Strict qualified as StrictSTM import Control.DeepSeq import Control.Exception (SomeException (..)) import Control.Monad.Class.MonadAsync @@ -48,6 +50,10 @@ import Codec.CBOR.Encoding qualified as CBOR import Codec.CBOR.Read qualified as CBOR import Data.ByteString.Lazy (ByteString) +import Data.Either (partitionEithers) +import Data.List qualified as List +import Data.Sequence qualified as Seq +import Data.Set qualified as Set import Data.Typeable (Typeable) import GHC.Generics (Generic) @@ -137,18 +143,73 @@ getMempoolWriter :: forall txid m. => TVar m [txid] -> Mempool m txid (Tx txid) -> TxSubmissionMempoolWriter txid (Tx txid) Integer m InvalidTx -getMempoolWriter duplicateVar = - Mempool.getWriter DuplicateTx - getTxId - (\_ txs -> return - [ if getTxValid tx - then Right tx - else Left (getTxId tx, InvalidTx) - | tx <- txs - ] - ) - (\t -> atomically $ modifyTVar' duplicateVar - (map fst (filter ((== DuplicateTx) . snd) t) <>)) +getMempoolWriter duplicateVar (Mempool.Mempool mempoolVar) = + TxSubmissionMempoolWriter { + txId = getTxId, + mempoolAddTxs = \txs -> do + (acceptedTxs, rejectedTxs, duplicateValidTxIds) <- atomically $ do + Mempool.MempoolSeq { Mempool.mempoolSet, Mempool.mempoolSeq, Mempool.nextIdx } <- + StrictSTM.readTVar mempoolVar + + let (duplicateTxs, txsToValidate) = + List.partition (\tx -> getTxId tx `Set.member` mempoolSet) txs + duplicateRejectedTxs = + [ (getTxId tx, DuplicateTx) + | tx <- duplicateTxs + ] + duplicateValidTxIds = + [ getTxId tx + | tx <- duplicateTxs + , getTxValid tx + ] + (invalidRejectedTxs, validTxs) = + partitionEithers + [ if getTxValid tx + then Right tx + else Left (getTxId tx, InvalidTx) + | tx <- txsToValidate + ] + + (delta, mempoolSeq', nextIdx', acceptedTxs, duplicateValidTxIds') = + List.foldl' + (\(set, seq, idx, accepted, duplicates) tx -> + let txid = getTxId tx in + if txid `Set.member` set + then ( set + , seq + , idx + , accepted + , txid : duplicates + ) + else ( Set.insert txid set + , seq Seq.|> Mempool.WithIndex idx tx + , succ idx + , txid : accepted + , duplicates + ) + ) + (Set.empty, mempoolSeq, nextIdx, [], []) + validTxs + + StrictSTM.writeTVar + mempoolVar + Mempool.MempoolSeq { + Mempool.mempoolSet = mempoolSet `Set.union` delta, + Mempool.mempoolSeq = mempoolSeq', + Mempool.nextIdx = nextIdx' + } + + pure + ( acceptedTxs + , invalidRejectedTxs + ++ duplicateRejectedTxs + ++ [ (txid, DuplicateTx) | txid <- duplicateValidTxIds' ] + , duplicateValidTxIds ++ duplicateValidTxIds' + ) + + atomically $ modifyTVar' duplicateVar (duplicateValidTxIds <>) + pure (acceptedTxs, rejectedTxs) + } txSubmissionCodec2 :: MonadST m From 3c0b9f457a24c2e5bfa3810c49ef997b7fbfc5ab Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Thu, 23 Apr 2026 14:58:50 +0200 Subject: [PATCH 03/67] cardano-diffusion test: prop_txSubmission_chainIntegrity Add a test for ensuring that a peer can download all valid TXs when faced with two peers with conflicting tx order. --- cardano-diffusion/cardano-diffusion.cabal | 1 + .../Test/Cardano/Network/Diffusion/Testnet.hs | 218 ++++++++++++++- .../Network/Diffusion/Testnet/ChainedTxs.hs | 259 ++++++++++++++++++ .../Network/TxSubmission/MempoolWriter.hs | 3 +- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 3 +- .../Ouroboros/Network/TxSubmission/Types.hs | 65 ++++- 6 files changed, 532 insertions(+), 17 deletions(-) create mode 100644 cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/ChainedTxs.hs diff --git a/cardano-diffusion/cardano-diffusion.cabal b/cardano-diffusion/cardano-diffusion.cabal index 8f7a5381025..b8f586a3bd9 100644 --- a/cardano-diffusion/cardano-diffusion.cabal +++ b/cardano-diffusion/cardano-diffusion.cabal @@ -487,6 +487,7 @@ library cardano-diffusion-tests-lib exposed-modules: Test.Cardano.Network.Diffusion.Policies Test.Cardano.Network.Diffusion.Testnet + Test.Cardano.Network.Diffusion.Testnet.ChainedTxs Test.Cardano.Network.Diffusion.Testnet.MiniProtocols Test.Cardano.Network.Diffusion.Testnet.Simulation Test.Cardano.Network.OrphanInstances.Tests diff --git a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs index 4a89605a9a0..e73e0a8cdba 100644 --- a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs +++ b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs @@ -97,6 +97,8 @@ import Ouroboros.Network.TxSubmission.Outbound (TxSubmissionProtocolError (..)) import Simulation.Network.Snocket (BearerInfo (..), noAttenuation) +import Test.Cardano.Network.Diffusion.Testnet.ChainedTxs (ChainedPeerTxs (..)) +import Test.Cardano.Network.Diffusion.Testnet.ChainedTxs qualified as ChainedTxs import Test.Cardano.Network.Diffusion.Testnet.Simulation import Test.Ouroboros.Network.ConnectionManager.Timeouts @@ -179,6 +181,8 @@ tests = , testGroup "Tx Submission" [ nightlyTest $ testProperty "no protocol errors" prop_no_txSubmission_error_iosimpor + , nightlyTest $ testProperty "tx chain integrity" + prop_txSubmission_chainIntegrity_iosimpor ] , testGroup "Churn" [ nightlyTest $ testProperty "no timeouts" @@ -265,12 +269,15 @@ tests = prop_no_peershare_unwilling_iosim ] , testGroup "Tx Submission" - [ testProperty "no protocol errors" + [ ChainedTxs.tests + , testProperty "no protocol errors" prop_no_txSubmission_error_iosim , testProperty "all transactions" prop_txSubmission_allTransactions , testProperty "inflight coverage" prop_check_inflight_ratio + , testProperty "tx chain integrity" + prop_txSubmission_chainIntegrity_iosim ] , testGroup "Churn" [ testProperty "no timeouts" prop_churn_notimeouts_iosim @@ -1076,6 +1083,215 @@ prop_txSubmission_allTransactions (ArbTxDecisionPolicy decisionPolicy) | otherwise -> counterexample "Didn't found any entry in the map!" False +-- | Three-node diffusion topology used by the Tx Chain Integrity +-- property: one node (0.0.0.0) downloading txs from two downstream +-- peers (0.0.0.1 and 0.0.0.2). The node has no outbound txs of its own; +-- each downstream peer advertises the tx list it was assigned by +-- 'ChainedPeerTxs'. +txChainIntegrityDiffScript :: ArbTxDecisionPolicy + -> ChainedPeerTxs + -> DiffusionScript +txChainIntegrityDiffScript (ArbTxDecisionPolicy decisionPolicy) + (ChainedPeerTxs chainedTxsB chainedTxsC) = + let localRootConfig = LocalRootConfig + DoNotAdvertisePeer + InitiatorAndResponderDiffusionMode + Outbound + IsNotTrustable + + noPeerTargets = PeerSelectionTargets { + targetNumberOfRootPeers = 0, + targetNumberOfKnownPeers = 0, + targetNumberOfEstablishedPeers = 0, + targetNumberOfActivePeers = 0, + targetNumberOfKnownBigLedgerPeers = 0, + targetNumberOfEstablishedBigLedgerPeers = 0, + targetNumberOfActiveBigLedgerPeers = 0 + } + + upstreamTargets = PeerSelectionTargets { + targetNumberOfRootPeers = 1, + targetNumberOfKnownPeers = 1, + targetNumberOfEstablishedPeers = 1, + targetNumberOfActivePeers = 1, + targetNumberOfKnownBigLedgerPeers = 0, + targetNumberOfEstablishedBigLedgerPeers = 0, + targetNumberOfActiveBigLedgerPeers = 0 + } + + in DiffusionScript + (SimArgs 1 10 decisionPolicy) + (singletonTimedScript Map.empty) + [ ( NodeArgs + (-1) + InitiatorAndResponderDiffusionMode + Map.empty + PraosMode + (Script (DontUseBootstrapPeers :| [])) + (TestAddress (IPAddr (read "0.0.0.0") 0)) + PeerSharingDisabled + [] + (Script (LedgerPools [] :| [])) + (noPeerTargets, noPeerTargets) + (Script (DNSTimeout {getDNSTimeout = 10} :| [])) + (Script (DNSLookupDelay {getDNSLookupDelay = 0} :| [])) + Nothing + False + (Script (PraosFetchMode FetchModeDeadline :| [])) + [] + , [JoinNetwork 0] ) + , ( NodeArgs + (-2) + InitiatorAndResponderDiffusionMode + Map.empty + PraosMode + (Script (DontUseBootstrapPeers :| [])) + (TestAddress (IPAddr (read "0.0.0.1") 0)) + PeerSharingDisabled + [(1, 1, Map.fromList + [(RelayAccessAddress "0.0.0.0" 0, localRootConfig)])] + (Script (LedgerPools [] :| [])) + (upstreamTargets, upstreamTargets) + (Script (DNSTimeout {getDNSTimeout = 10} :| [])) + (Script (DNSLookupDelay {getDNSLookupDelay = 0} :| [])) + Nothing + False + (Script (PraosFetchMode FetchModeDeadline :| [])) + chainedTxsB + , [JoinNetwork 0] ) + , ( NodeArgs + (-3) + InitiatorAndResponderDiffusionMode + Map.empty + PraosMode + (Script (DontUseBootstrapPeers :| [])) + (TestAddress (IPAddr (read "0.0.0.2") 0)) + PeerSharingDisabled + [(1, 1, Map.fromList + [(RelayAccessAddress "0.0.0.0" 0, localRootConfig)])] + (Script (LedgerPools [] :| [])) + (upstreamTargets, upstreamTargets) + (Script (DNSTimeout {getDNSTimeout = 10} :| [])) + (Script (DNSLookupDelay {getDNSLookupDelay = 0} :| [])) + Nothing + False + (Script (PraosFetchMode FetchModeDeadline :| [])) + chainedTxsC + , [JoinNetwork 0] ) + ] + +-- | Txs guaranteed to reach the node's mempool: at least one downstream +-- peer carries the tx together with all of its ancestors, and every +-- member of that chain is valid. Txs whose chain isn't fully represented +-- on either downstream peer are legitimately unreachable and correctly +-- excluded from this lower bound. V2 may deliver additional txs when a +-- split chain is assembled across downstream peers via favourable +-- ordering, but those are not guaranteed. +txChainIntegrityExpected :: ChainedPeerTxs -> Set TxId +txChainIntegrityExpected (ChainedPeerTxs chainedTxsB chainedTxsC) = + Set.union + (perPeerDeliverable chainedTxsB) + (perPeerDeliverable chainedTxsC) + where + perPeerDeliverable :: [Tx TxId] -> Set TxId + perPeerDeliverable peerTxs = + let txMap = Map.fromList [(getTxId t, t) | t <- peerTxs] + complete tid = case Map.lookup tid txMap of + Nothing -> False + Just t -> getTxValid t + && maybe True complete (getTxParent t) in + Set.fromList [ getTxId t | t <- peerTxs, complete (getTxId t) ] + +checkTxChainIntegrity :: forall r. + ChainedPeerTxs + -> Set TxId + -> SimTrace r + -> Int + -> Property +checkTxChainIntegrity (ChainedPeerTxs chainedTxsB chainedTxsC) + expectedAtReceiver + ioSimTrace + traceNumber = + let trace = Trace.take traceNumber ioSimTrace + + events = fmap (\(WithTime t (WithName name b)) -> + WithName name (WithTime t b)) + . withTimeNameTraceEvents + @DiffusionTestTrace + @NtNAddr + $ trace + + sortedAcceptedTxidsMap :: Map NtNAddr [TxId] + sortedAcceptedTxidsMap = + foldr (\l r -> + List.foldl' (\rr (WithName n (WithTime _ x)) -> + case x of + DiffusionTxSubmissionInbound (TraceTxInboundAddedToMempool txids _) -> + Map.alter (maybe (Just txids) (Just . sort . (txids ++))) n rr + _ -> rr) r l + ) Map.empty + . Trace.toList + . splitWithNameTrace + $ events + + receiverAddr = TestAddress (IPAddr (read "0.0.0.0") 0) + accepted = Map.lookup receiverAddr sortedAcceptedTxidsMap + actualSet = maybe Set.empty Set.fromList accepted + missing = expectedAtReceiver `Set.difference` actualSet in + + counterexample ("accepted txids map: " ++ show sortedAcceptedTxidsMap) + $ counterexample ("downstream peer B outbound: " ++ show chainedTxsB) + $ counterexample ("downstream peer C outbound: " ++ show chainedTxsC) + $ counterexample ("expected (reliably deliverable): " + ++ show (Set.toList expectedAtReceiver)) + $ counterexample ("missing from node: " ++ show (Set.toList missing)) + $ label ("expected count: " + ++ renderRanges 5 (Set.size expectedAtReceiver)) + $ label ("accepted count: " + ++ renderRanges 5 (Set.size actualSet)) + $ counterexample "node (0.0.0.0)" + $ property (Set.null missing) + +-- | Tx Chain Integrity property: the node's mempool accumulates every +-- transaction reachable via a complete, valid ancestor chain on at least +-- one of its downstream peers. +-- +-- This exercises V2's cross-peer retry path: if an adversarial downstream +-- peer delivers a child out-of-order and the mempool rejects with +-- 'MissingParent', the tx must still reach the node via the well-behaved +-- downstream peer's re-advertisement. +prop_txSubmission_chainIntegrity :: ArbTxDecisionPolicy + -> ChainedPeerTxs + -> Property +prop_txSubmission_chainIntegrity argPolicy chainedTxs = + let diffScript = txChainIntegrityDiffScript argPolicy chainedTxs + expected = txChainIntegrityExpected chainedTxs in + checkTxChainIntegrity + chainedTxs + expected + (runSimTrace (diffusionSimulation noAttenuation diffScript)) + long_trace + +prop_txSubmission_chainIntegrity_iosimpor :: ArbTxDecisionPolicy + -> ChainedPeerTxs + -> Property +prop_txSubmission_chainIntegrity_iosimpor argPolicy chainedTxs = + let diffScript = txChainIntegrityDiffScript argPolicy chainedTxs + expected = txChainIntegrityExpected chainedTxs + sim :: forall s. IOSim s Void + sim = do + exploreRaces + diffusionSimulation noAttenuation diffScript + in labelDiffusionScript diffScript + $ exploreSimTrace (\a -> a { explorationScheduleBound = 10 }) sim $ \_ trace -> + checkTxChainIntegrity chainedTxs expected trace long_trace + +prop_txSubmission_chainIntegrity_iosim :: ArbTxDecisionPolicy + -> ChainedPeerTxs + -> Property +prop_txSubmission_chainIntegrity_iosim = prop_txSubmission_chainIntegrity + + -- | This test checks the ratio of the inflight txs against the allowed by the -- TxDecisionPolicy. -- diff --git a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/ChainedTxs.hs b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/ChainedTxs.hs new file mode 100644 index 00000000000..1cdedf129f8 --- /dev/null +++ b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/ChainedTxs.hs @@ -0,0 +1,259 @@ +module Test.Cardano.Network.Diffusion.Testnet.ChainedTxs + ( ChainedPeerTxs (..) + , tests + ) where + +import Data.Function (on) +import Data.List as List (foldl', nub, nubBy) +import Data.Map.Strict (Map) +import Data.Map.Strict qualified as Map +import Data.Set (Set) +import Data.Set qualified as Set + +import Test.Ouroboros.Network.TxSubmission.Types (Tx (..), TxId) + +import Test.QuickCheck +import Test.Tasty (TestTree, testGroup) +import Test.Tasty.QuickCheck (testProperty) + + +-- | A randomly generated transaction forest distributed across two peers, +-- used by chain-integrity tests at the diffusion layer. +-- +-- Each tx may carry a parent pointer ('getTxParent') to any earlier tx in +-- the forest, with some probability of having no parent at all. Invalidity +-- propagates down the chain at generation time: a descendant of an invalid +-- tx is itself generated as invalid, matching mainnet semantics where a tx +-- that consumes an invalid parent's output is itself invalid by construction. +-- +-- Peer assignment reflects realistic mainnet conditions: +-- +-- * Each peer carries a random subset of the forest (peers may lack txs +-- that, for example, were already included in an adopted block). +-- * Well-behaved peers (the common case) advertise their subset in +-- chain-topological order: parents before children. +-- * Adversarial or buggy peers (the occasional case) advertise in a +-- shuffled order to stress V2's handling of out-of-order streams. +-- * Every tx is carried by at least one /well-behaved/ peer so the full +-- forest is reachable via the reliable path, even when adversarial +-- peers misorder or mishandle their share. +data ChainedPeerTxs = ChainedPeerTxs { + chainedTxsA :: [Tx TxId] + , chainedTxsB :: [Tx TxId] + } + deriving Show + +instance Arbitrary ChainedPeerTxs where + arbitrary = do + chainLen <- choose (3, 15) + chain <- genChainedTxs chainLen + perPeer <- distributeAcrossPeers 2 chain + case perPeer of + [txsA, txsB] -> return (ChainedPeerTxs txsA txsB) + _ -> error "ChainedPeerTxs: distributeAcrossPeers invariant broken" + + shrink (ChainedPeerTxs txsA txsB) = + [ ChainedPeerTxs (dropDoomed txsA) (dropDoomed txsB) + | tid <- nub (map getTxId (txsA ++ txsB)) + , let doomed = transitiveDescendants tid (txsA ++ txsB) + dropDoomed = filter (\t -> getTxId t `Set.notMember` doomed) + ] + where + -- | All txids reachable as descendants of @root@, including @root@ + -- itself. Used so that dropping a parent also drops every + -- transitive child, avoiding dangling parent pointers in the + -- shrunken value. + transitiveDescendants :: TxId -> [Tx TxId] -> Set TxId + transitiveDescendants root txs = + let children p = [ getTxId t | t <- txs, getTxParent t == Just p ] + go acc p + | p `Set.member` acc = acc + | otherwise = List.foldl' go (Set.insert p acc) (children p) in + go Set.empty root + + +-- | Generate a forest of @n@ transactions. +-- +-- Tx 0 has no parent. Each subsequent tx picks its parent uniformly from +-- @{Nothing}@ unioned with the ids of earlier txs, weighted so a parent +-- link is more common than no parent. Invalidity is propagated: if the +-- parent is invalid, the tx is also invalid regardless of its own drawn +-- validity. +genChainedTxs :: Int -> Gen [Tx TxId] +genChainedTxs n = go 0 Map.empty [] + where + baseId :: TxId + baseId = 1 + + go :: Int -> Map TxId Bool -> [Tx TxId] -> Gen [Tx TxId] + go i validMap acc + | i >= n = return (reverse acc) + | otherwise = do + let txid = baseId + i + parent <- if i == 0 + then return Nothing + else frequency + [ (1, return Nothing) + , (3, Just . (baseId +) <$> choose (0, i - 1)) + ] + ownValid <- frequency [ (3, return True), (1, return False) ] + size <- chooseEnum (0, 1024) + let parentValid = maybe True (validMap Map.!) parent + effectiveValid = ownValid && parentValid + tx = Tx + { getTxId = txid + , getTxSize = size + , getTxAdvSize = size + , getTxValid = effectiveValid + , getTxParent = parent + } + go (i + 1) (Map.insert txid effectiveValid validMap) (tx : acc) + + +-- | Per-peer behaviour selected up front so chain-coverage constraints +-- can be satisfied against the well-behaved subset before any lists are +-- emitted. +data PeerBehaviour = WellBehaved | Adversarial + deriving (Eq, Show) + +-- | Distribute a forest across @peerCount@ peers. +-- +-- Each peer is classified as well-behaved (advertises in chain-topological +-- order, the common case) or adversarial (advertises in a shuffled order, +-- the occasional case). To keep the property invariant clean, every tx +-- is guaranteed to be carried by at least one /well-behaved/ peer: +-- adversarial peers add noise and races but cannot singly strand a tx. At +-- least one peer is always well-behaved. +-- +-- Adversarial peers may duplicate txs already carried by well-behaved +-- peers; this exercises V2's cross-peer retry path when an adversarial +-- peer's out-of-order delivery causes a rejection. +distributeAcrossPeers :: Int -> [Tx TxId] -> Gen [[Tx TxId]] +distributeAcrossPeers peerCount chain = do + behaviours <- ensureSomeWellBehaved <$> + vectorOf peerCount + (frequency [ (4, pure WellBehaved) + , (1, pure Adversarial) ]) + + subsets <- vectorOf peerCount (sublistOf chain) + + let wellBehavedIxs = + [ i | (i, WellBehaved) <- zip [0..] behaviours ] + wellBehavedCov = + Set.unions + [ Set.fromList (map getTxId (subsets !! i)) + | i <- wellBehavedIxs ] + uncovered = + [ t | t <- chain, getTxId t `Set.notMember` wellBehavedCov ] + additions <- traverse + (\t -> do i <- elements wellBehavedIxs; pure (i, t)) + uncovered + + let subsetsWithCoverage :: [[Tx TxId]] + subsetsWithCoverage = + zipWith + (\i base -> base ++ [ t | (p, t) <- additions, p == i ]) + [0 .. peerCount - 1] + subsets + + inChainOrder :: Set TxId -> [Tx TxId] + inChainOrder peerSet = + filter (\t -> getTxId t `Set.member` peerSet) chain + + traverse + (\(b, s) -> case b of + WellBehaved -> pure (inChainOrder (Set.fromList (map getTxId s))) + Adversarial -> shuffle s) + (zip behaviours subsetsWithCoverage) + where + ensureSomeWellBehaved bs + | WellBehaved `elem` bs = bs + | otherwise = WellBehaved : drop 1 bs + + +-- +-- Meta-tests: verify generator and shrinker invariants. +-- + +-- | Every parent pointer in the combined peer lists resolves to a tx that +-- also appears somewhere in the union. No dangling parents. +prop_parentsResolvable :: ChainedPeerTxs -> Bool +prop_parentsResolvable (ChainedPeerTxs txsA txsB) = + let ids = Set.fromList (map getTxId (txsA ++ txsB)) + parents = [ p | t <- txsA ++ txsB, Just p <- [getTxParent t] ] in + all (`Set.member` ids) parents + +-- | Invalidity propagates along the dependency chain: if a tx's parent is +-- present in the union and invalid, the tx itself must be invalid too. +prop_invalidityPropagates :: ChainedPeerTxs -> Bool +prop_invalidityPropagates (ChainedPeerTxs txsA txsB) = + let allTxs = nubBy ((==) `on` getTxId) (txsA ++ txsB) + txMap = Map.fromList [(getTxId t, t) | t <- allTxs] in + all (\t -> case getTxParent t of + Nothing -> True + Just p -> case Map.lookup p txMap of + Nothing -> True + Just pt -> getTxValid pt || not (getTxValid t)) + -- not (getTxValid pt) => not (getTxValid t) + allTxs + +-- | At least one peer's tx list is in chain-topological order (parents +-- before children), and the union of all such peers' txids covers every +-- tx in the value. This is the external face of the "every tx is carried +-- by at least one well-behaved peer" guarantee from +-- 'distributeAcrossPeers': adversarial peers may exist and misorder, but +-- the full forest is always reachable via the chain-ordered subset. +prop_wellBehavedCoverage :: ChainedPeerTxs -> Property +prop_wellBehavedCoverage (ChainedPeerTxs txsA txsB) = + let allIds = Set.fromList (map getTxId (txsA ++ txsB)) + chainOrdered = filter isChainOrdered [txsA, txsB] + coverage = Set.unions + [ Set.fromList (map getTxId txs) | txs <- chainOrdered ] in + counterexample ("peer A: " ++ show txsA) + $ counterexample ("peer B: " ++ show txsB) + $ counterexample ("chain-ordered peers: " ++ show (length chainOrdered)) + $ counterexample ("coverage: " ++ show (Set.toList coverage)) + $ counterexample ("all ids: " ++ show (Set.toList allIds)) + $ property (not (null chainOrdered) && coverage == allIds) + where + isChainOrdered :: [Tx TxId] -> Bool + isChainOrdered txs = + let positions = Map.fromList (zip (map getTxId txs) [(0 :: Int) ..]) in + all (\t -> case getTxParent t of + Nothing -> True + Just p -> case Map.lookup p positions of + Nothing -> True + Just ppos -> ppos < positions Map.! getTxId t) + txs + + +-- | Every shrink of a value satisfies the same structural invariants as +-- a freshly generated one. +prop_shrinkPreservesInvariants :: ChainedPeerTxs -> Property +prop_shrinkPreservesInvariants cpt = + conjoin + [ counterexample ("shrunk: " ++ show s) $ + prop_parentsResolvable s + .&&. prop_invalidityPropagates s + .&&. prop_wellBehavedCoverage s + | s <- shrink cpt + ] + +-- | Each shrink strictly reduces the total tx count so shrinking converges. +prop_shrinkMakesProgress :: ChainedPeerTxs -> Property +prop_shrinkMakesProgress cpt = + let size (ChainedPeerTxs a b) = length a + length b + origSize = size cpt in + conjoin + [ counterexample ("shrunk: " ++ show s) (size s < origSize) + | s <- shrink cpt + ] + +tests :: TestTree +tests = testGroup "ChainedTxs" + [ testProperty "parents resolvable" prop_parentsResolvable + , testProperty "invalidity propagates" prop_invalidityPropagates + , testProperty "well-behaved coverage" prop_wellBehavedCoverage + , testProperty "shrink preserves invariants" prop_shrinkPreservesInvariants + , testProperty "shrink makes progress" prop_shrinkMakesProgress + ] diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/MempoolWriter.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/MempoolWriter.hs index 4915b8fd9c9..2dc5a722851 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/MempoolWriter.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/MempoolWriter.hs @@ -42,5 +42,6 @@ unit_getMempoolWriter_recordsOnlyValidDuplicates step = do getTxId = txid, getTxSize = SizeInBytes 1, getTxAdvSize = SizeInBytes 1, - getTxValid = isValid + getTxValid = isValid, + getTxParent = Nothing } diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 79041ec3bad..ff1e58eb561 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -835,7 +835,8 @@ instance Arbitrary ArbCollectTxs where getTxSize = size, -- `availableTxIds` contains advertised sizes getTxAdvSize = availableTxIds Map.! txid, - getTxValid = valid }) + getTxValid = valid, + getTxParent = Nothing }) pure $ assert (foldMap getTxAdvSize receivedTx <= requestedTxsInflightSize) $ ArbCollectTxs mempoolHasTxFun diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Types.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Types.hs index ace9ed5060a..c53bba29b47 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Types.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Types.hs @@ -78,7 +78,12 @@ data Tx txid = Tx { -- | If false this means that when this tx will be submitted to a remote -- mempool it will not be valid. The outbound mempool might contain -- invalid tx's in this sense. - getTxValid :: !Bool + getTxValid :: !Bool, + -- | Optional parent dependency: this tx may only be accepted into a + -- mempool once its parent is already present (either from an earlier + -- batch or earlier in the same batch). 'Nothing' means no dependency. + -- Used by chain-aware tests to model transaction chains. + getTxParent :: !(Maybe txid) } deriving (Eq, Ord, Show, Generic, NFData) @@ -101,6 +106,9 @@ instance Arbitrary txid => Arbitrary (Tx txid) where <*> frequency [ (3, pure True) , (1, pure False) ] + <*> pure Nothing + -- ^ Generic Arbitrary produces standalone txs with no parent. + -- Chain-aware generators construct parents explicitly. -- maximal tx size maxTxSize :: SizeInBytes @@ -128,7 +136,7 @@ getMempoolReader :: forall txid m. getMempoolReader = Mempool.getReader getTxId getTxAdvSize -data InvalidTx = InvalidTx | DuplicateTx +data InvalidTx = InvalidTx | DuplicateTx | MissingParent deriving (Eq, Show) getMempoolWriter :: forall txid m. @@ -170,9 +178,9 @@ getMempoolWriter duplicateVar (Mempool.Mempool mempoolVar) = | tx <- txsToValidate ] - (delta, mempoolSeq', nextIdx', acceptedTxs, duplicateValidTxIds') = + (delta, mempoolSeq', nextIdx', acceptedTxs, duplicateValidTxIds', missingParentIds) = List.foldl' - (\(set, seq, idx, accepted, duplicates) tx -> + (\(set, seq, idx, accepted, duplicates, missing) tx -> let txid = getTxId tx in if txid `Set.member` set then ( set @@ -180,15 +188,31 @@ getMempoolWriter duplicateVar (Mempool.Mempool mempoolVar) = , idx , accepted , txid : duplicates + , missing ) - else ( Set.insert txid set - , seq Seq.|> Mempool.WithIndex idx tx - , succ idx - , txid : accepted - , duplicates - ) + else case getTxParent tx of + Just p + | not (p `Set.member` set) + && not (p `Set.member` mempoolSet) -> + -- Parent is not in the mempool and has not + -- been accepted earlier in this batch. + ( set + , seq + , idx + , accepted + , duplicates + , txid : missing + ) + _ -> + ( Set.insert txid set + , seq Seq.|> Mempool.WithIndex idx tx + , succ idx + , txid : accepted + , duplicates + , missing + ) ) - (Set.empty, mempoolSeq, nextIdx, [], []) + (Set.empty, mempoolSeq, nextIdx, [], [], []) validTxs StrictSTM.writeTVar @@ -203,7 +227,8 @@ getMempoolWriter duplicateVar (Mempool.Mempool mempoolVar) = ( acceptedTxs , invalidRejectedTxs ++ duplicateRejectedTxs - ++ [ (txid, DuplicateTx) | txid <- duplicateValidTxIds' ] + ++ [ (txid, DuplicateTx) | txid <- duplicateValidTxIds' ] + ++ [ (txid, MissingParent) | txid <- missingParentIds ] , duplicateValidTxIds ++ duplicateValidTxIds' ) @@ -219,12 +244,13 @@ txSubmissionCodec2 = codecTxSubmission2 CBOR.encodeInt CBOR.decodeInt encodeTx decodeTx where - encodeTx Tx {getTxId, getTxSize, getTxAdvSize, getTxValid} = - CBOR.encodeListLen 4 + encodeTx Tx {getTxId, getTxSize, getTxAdvSize, getTxValid, getTxParent} = + CBOR.encodeListLen 5 <> CBOR.encodeInt getTxId <> CBOR.encodeWord32 (getSizeInBytes getTxSize) <> CBOR.encodeWord32 (getSizeInBytes getTxAdvSize) <> CBOR.encodeBool getTxValid + <> encodeMaybeInt getTxParent decodeTx = do _ <- CBOR.decodeListLen @@ -232,6 +258,17 @@ txSubmissionCodec2 = <*> (SizeInBytes <$> CBOR.decodeWord32) <*> (SizeInBytes <$> CBOR.decodeWord32) <*> CBOR.decodeBool + <*> decodeMaybeInt + + encodeMaybeInt Nothing = CBOR.encodeListLen 0 + encodeMaybeInt (Just i) = CBOR.encodeListLen 1 <> CBOR.encodeInt i + + decodeMaybeInt = do + n <- CBOR.decodeListLen + case n of + 0 -> pure Nothing + 1 -> Just <$> CBOR.decodeInt + _ -> fail "decodeMaybeInt: unexpected list length" newtype LargeNonEmptyList a = LargeNonEmpty { getLargeNonEmpty :: [a] } From 7d36b87cee153fd6f806cee9f72f448405b5142e Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Wed, 1 Apr 2026 10:26:22 +0200 Subject: [PATCH 04/67] WIP: Remove central decision from tx-submission v2 Replace the central decsision thread by having server threads coordinate between them by blocking on STM actions. --- .../Test/Cardano/Network/Diffusion/Testnet.hs | 19 +- .../Diffusion/Testnet/MiniProtocols.hs | 26 +- ouroboros-network/bench/Main.hs | 12 +- .../Network/TxSubmission/Inbound/V2.hs | 431 +- .../TxSubmission/Inbound/V2/Decision.hs | 417 -- .../Network/TxSubmission/Inbound/V2/Policy.hs | 11 +- .../TxSubmission/Inbound/V2/Registry.hs | 1061 ++--- .../Network/TxSubmission/Inbound/V2/State.hs | 1617 +++++--- .../Network/TxSubmission/Inbound/V2/Types.hs | 767 ++-- .../Ouroboros/Network/OrphanInstances.hs | 94 +- ouroboros-network/ouroboros-network.cabal | 6 +- .../Test/Ouroboros/Network/Diffusion/Node.hs | 18 +- .../Network/Diffusion/Node/Kernel.hs | 24 +- .../Ouroboros/Network/TxSubmission/AppV2.hs | 155 +- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 3517 ++++++++++------- .../Ouroboros/Network/Tracing/TxSubmission.hs | 130 +- .../Network/Tracing/TxSubmission/Inbound.hs | 23 +- 17 files changed, 4714 insertions(+), 3614 deletions(-) delete mode 100644 ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Decision.hs diff --git a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs index e73e0a8cdba..0bb1222691f 100644 --- a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs +++ b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs @@ -37,6 +37,7 @@ import Data.Dynamic (fromDynamic) import Data.Foldable (fold, foldr') import Data.Functor (void) import Data.IP qualified as IP +import Data.IntMap.Strict qualified as IntMap import Data.List (intercalate, sort) import Data.List qualified as List import Data.List.Trace qualified as Trace @@ -1322,13 +1323,29 @@ prop_check_inflight_ratio bi ds@(DiffusionScript simArgs _ _) = $ Signal.eventsToList $ Signal.selectEvents (\case - DiffusionTxLogic (TraceSharedTxState _ d) -> Just (inflightTxs d) + DiffusionTxLogic (TraceSharedTxState _ d) -> Just (inflightAttemptCounts d) _ -> Nothing ) events txDecisionPolicy = saTxDecisionPolicy simArgs + inflightAttemptCounts :: SharedTxState NtNAddr Int -> Map Int Int + inflightAttemptCounts SharedTxState { sharedTxTable, sharedKeyToTxId } = + Map.fromList + [ (txid, activeAttemptCount txEntry) + | (txKey, txEntry) <- IntMap.toList sharedTxTable + , Just txid <- [IntMap.lookup txKey sharedKeyToTxId] + ] + + activeAttemptCount :: TxEntry peeraddr -> Int + activeAttemptCount TxEntry { txAttempts } = + length + [ () + | attempt <- Map.elems txAttempts + , attempt == TxDownloading || attempt == TxBuffered + ] + in tabulate "Max observeed ratio of inflight multiplicity by the max stipulated by the policy" (map (\m -> "has " ++ show m ++ " in flight - ratio: " ++ show @(Ratio Int) (fromIntegral m / fromIntegral (txInflightMultiplicity txDecisionPolicy)) diff --git a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs index 6f56fa53972..6326d6daf17 100644 --- a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs +++ b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs @@ -109,7 +109,7 @@ import Ouroboros.Network.TxSubmission.Inbound.V2 (TxSubmissionInitDelay (..), txSubmissionInboundV2) import Ouroboros.Network.TxSubmission.Inbound.V2.Policy (TxDecisionPolicy (..)) import Ouroboros.Network.TxSubmission.Inbound.V2.Registry (SharedTxStateVar, - TxChannelsVar, TxMempoolSem, withPeer) + TxSubmissionCountersVar, withPeer) import Ouroboros.Network.TxSubmission.Inbound.V2.Types (TraceTxLogic, TraceTxSubmissionInbound) import Ouroboros.Network.TxSubmission.Outbound (txSubmissionOutbound) @@ -280,7 +280,7 @@ applications :: forall block header s m. -> Diffusion.Applications NtNAddr NtNVersion NtNVersionData NtCAddr NtCVersion NtCVersionData PeerTrustable m () -applications debugTracer txSubmissionInboundTracer txSubmissionInboundDebug nodeKernel +applications debugTracer txSubmissionInboundTracer _txSubmissionInboundDebug nodeKernel Codecs { chainSyncCodec, blockFetchCodec , keepAliveCodec, pingPongCodec , peerSharingCodec @@ -382,8 +382,7 @@ applications debugTracer txSubmissionInboundTracer txSubmissionInboundDebug node InitiatorAndResponderProtocol (txSubmissionInitiator aaTxDecisionPolicy (nkMempool nodeKernel)) (txSubmissionResponder (nkMempool nodeKernel) - (nkTxChannelsVar nodeKernel) - (nkTxMempoolSem nodeKernel) + (nkTxCountersVar nodeKernel) (nkSharedTxStateVar nodeKernel)) } ] @@ -715,27 +714,24 @@ applications debugTracer txSubmissionInboundTracer txSubmissionInboundDebug node txSubmissionResponder :: Mempool m TxId (Tx TxId) - -> TxChannelsVar m NtNAddr Int (Tx Int) - -> TxMempoolSem m - -> SharedTxStateVar m NtNAddr Int (Tx Int) + -> TxSubmissionCountersVar m + -> SharedTxStateVar m NtNAddr Int -> MiniProtocolCb (ResponderContext NtNAddr) ByteString m () - txSubmissionResponder mempool txChannelsVar txMempoolSem sharedTxStateVar = + txSubmissionResponder mempool txCountersVar sharedTxStateVar = MiniProtocolCb $ \ ResponderContext { rcConnectionId = connId@ConnectionId { remoteAddress = them }} channel -> do - withPeer txSubmissionInboundDebug - txChannelsVar - txMempoolSem - aaTxDecisionPolicy - sharedTxStateVar + withPeer aaTxDecisionPolicy (getMempoolReader mempool) - (getMempoolWriter duplicateTxVar mempool) - getTxSize + sharedTxStateVar + txCountersVar them $ \api -> do let server = txSubmissionInboundV2 txSubmissionInboundTracer NoTxSubmissionInitDelay + (getMempoolReader mempool) (getMempoolWriter duplicateTxVar mempool) + getTxSize api labelThisThread "TxSubmissionServer" runPipelinedPeerWithLimits diff --git a/ouroboros-network/bench/Main.hs b/ouroboros-network/bench/Main.hs index cbfbd6a6235..db7b6a15d28 100644 --- a/ouroboros-network/bench/Main.hs +++ b/ouroboros-network/bench/Main.hs @@ -14,9 +14,11 @@ import System.Random.SplitMix qualified as SM import Test.Tasty.Bench import Text.Pretty.Simple (pPrint) -import Ouroboros.Network.TxSubmission.Inbound.V2.Decision qualified as Tx -import Ouroboros.Network.TxSubmission.Inbound.V2.Policy -import Ouroboros.Network.TxSubmission.Inbound.V2.State (SharedTxState (..)) +-- Disabled pending a replacement benchmark suite for the peer-driven V2 +-- tx-submission scheduler. +-- import Ouroboros.Network.TxSubmission.Inbound.V2.Decision qualified as Tx +-- import Ouroboros.Network.TxSubmission.Inbound.V2.Policy +-- import Ouroboros.Network.TxSubmission.Inbound.V2.State (SharedTxState (..)) import Test.Ouroboros.Network.PeerSelection.PeerMetric (microbenchmark1GenerateInput, microbenchmark1ProcessInput) @@ -35,6 +37,7 @@ main = , env (microbenchmark1GenerateInput False 100_000) $ \i -> bench "100k" $ nfAppIO microbenchmark1ProcessInput i ] + {- Disabled until TxLogic has replacement peer-driven V2 benchmarks. , bgroup "TxLogic" [ env (do let a = TX.mkDecisionContext (SM.mkSMGen 131) 10 evaluate (rnf a) @@ -88,7 +91,6 @@ main = f = flip (uncurry Tx.makeDecisions) (peerTxStates state) in nf f a ) -{- , env (do smGen <- SM.initSMGen print smGen @@ -101,7 +103,7 @@ main = bench "makeDecisions: random" $ nf (uncurry Tx.makeDecisions) a ) --} ] + -} ] ] diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs index cf25071361e..4d3674f30fb 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs @@ -1,4 +1,3 @@ -{-# LANGUAGE BangPatterns #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE KindSignatures #-} @@ -7,32 +6,39 @@ {-# LANGUAGE ScopedTypeVariables #-} module Ouroboros.Network.TxSubmission.Inbound.V2 - ( -- * TxSubmision Inbound client + ( -- * TxSubmission Inbound client txSubmissionInboundV2 -- * Supporting types and APIs , module V2 , TxDecisionPolicy (..) , defaultTxDecisionPolicy + , TxSubmissionInitDelay (..) ) where +import Data.Functor (void) import Data.List.NonEmpty qualified as NonEmpty import Data.Map.Strict qualified as Map import Data.Sequence.Strict qualified as StrictSeq import Data.Set qualified as Set +import Data.Typeable (Typeable) -import Control.Exception (assert) -import Control.Monad (unless, when) +import Control.Concurrent.Class.MonadSTM.Strict +import Control.Monad (unless) import Control.Monad.Class.MonadThrow +import Control.Monad.Class.MonadTime.SI import Control.Monad.Class.MonadTimer.SI import Control.Tracer (Tracer, traceWith) import Network.TypedProtocol import Ouroboros.Network.Protocol.TxSubmission2.Server +import Ouroboros.Network.Protocol.TxSubmission2.Type (NumTxIdsToAck, + SizeInBytes) import Ouroboros.Network.TxSubmission.Inbound.V2.Policy import Ouroboros.Network.TxSubmission.Inbound.V2.Registry as V2 -import Ouroboros.Network.TxSubmission.Inbound.V2.State import Ouroboros.Network.TxSubmission.Inbound.V2.Types as V2 +import Ouroboros.Network.TxSubmission.Mempool.Reader + -- | A tx-submission inbound side (server, sic!). -- @@ -44,162 +50,325 @@ import Ouroboros.Network.TxSubmission.Inbound.V2.Types as V2 txSubmissionInboundV2 :: forall txid tx idx m err. ( MonadDelay m + , MonadSTM m , MonadThrow m , Ord txid + , Show txid + , Typeable txid ) => Tracer m (TraceTxSubmissionInbound txid tx) -> TxSubmissionInitDelay + -> TxSubmissionMempoolReader txid tx idx m -> TxSubmissionMempoolWriter txid tx idx m err + -> (tx -> SizeInBytes) -> PeerTxAPI m txid tx -> TxSubmissionServerPipelined txid tx m () txSubmissionInboundV2 tracer initDelay - TxSubmissionMempoolWriter { txId } + TxSubmissionMempoolReader { mempoolGetSnapshot } + TxSubmissionMempoolWriter { txId, mempoolAddTxs } + txSize PeerTxAPI { - readTxDecision, - handleReceivedTxIds, - handleReceivedTxs, - submitTxToMempool - } - = + awaitSharedChange, + runNextPeerAction, + runNextPeerActionPipelined, + applyReceivedTxIds, + applyReceivedTxs, + applySubmittedTxs, + countRejectedTxs, + resolveTxRequest, + resolveBufferedTxs, + startSubmittingTxs, + addCounters + } = TxSubmissionServerPipelined $ do + -- The pipelined server API does not thread a user state parameter through + -- `ServerStIdle`. Multiple continuations here resume after network IO and + -- must all access and update the latest peer-local state, so a plain + -- `PeerTxLocalState` captured in closures would go stale. + -- + -- No other threads access the peer's peerStateVar. + peerStateVar <- newTVarIO emptyPeerTxLocalState case initDelay of TxSubmissionInitDelay delay -> threadDelay delay NoTxSubmissionInitDelay -> return () - serverIdle + serverIdle peerStateVar where - serverIdle - :: m (ServerStIdle Z txid tx m ()) - serverIdle = do - -- Block on next decision. - txd@TxDecision { txdTxsToRequest = txsToRequest, - txdTxsToMempool = TxsToMempool { listOfTxsToMempool } } - <- readTxDecision - traceWith tracer (TraceTxInboundDecision txd) - - let !collected = length listOfTxsToMempool - - -- Only attempt to add TXs if we have some work to do - when (collected > 0) $ do - -- submitTxToMempool traces: - -- * `TraceTxSubmissionProcessed`, - -- * `TraceTxInboundAddedToMempool`, and - -- * `TraceTxInboundRejectedFromMempool` - -- events. - mapM_ (uncurry $ submitTxToMempool tracer) listOfTxsToMempool - - -- TODO: - -- We can update the state so that other `tx-submission` servers will - -- not try to add these txs to the mempool. - if Map.null txsToRequest - then serverReqTxIds Zero txd - else serverReqTxs txd - - -- Pipelined request of txs - serverReqTxs :: TxDecision txid tx - -> m (ServerStIdle Z txid tx m ()) - serverReqTxs txd@TxDecision { txdTxsToRequest = txdTxsToRequest } = - pure $ SendMsgRequestTxsPipelined txdTxsToRequest - (serverReqTxIds (Succ Zero) txd) + -- Entry point and reset state for the non-pipelined server loop. + -- + -- This function is called when: + -- 1. The server first starts + -- 2. All pipelined requests have completed and the counter returns to zero + -- 3. An idle peer wakes up after a @PeerDoNothing@ wait + serverIdle :: StrictTVar m (PeerTxLocalState tx) + -> m (ServerStIdle Z txid tx m ()) + serverIdle peerStateVar = do + peerState <- readTVarIO peerStateVar + + now <- getMonotonicTime + (peerAction, peerState') <- runNextPeerAction now peerState + atomically $ writeTVar peerStateVar peerState' + case peerAction of + PeerDoNothing generation mDelay -> do + awaitSharedChange generation mDelay + serverIdle peerStateVar + PeerSubmitTxs txKeys -> + submitBufferedTxs peerStateVar txKeys (serverIdle peerStateVar) + PeerRequestTxs txKeys -> + requestTxBodies peerStateVar Zero txKeys + PeerRequestTxIds txIdsToAck txIdsToReq -> do + serverReqTxIds peerStateVar Zero txIdsToAck txIdsToReq + + -- | Submit buffered transaction bodies to the mempool. + submitBufferedTxs :: forall (n :: N). + StrictTVar m (PeerTxLocalState tx) + -> [TxKey] + -> m (ServerStIdle n txid tx m ()) + -> m (ServerStIdle n txid tx m ()) + submitBufferedTxs peerStateVar txKeys k = do + + peerState <- readTVarIO peerStateVar + bufferedTxs <- resolveBufferedTxs peerState txKeys + + -- Flags the txs as on the way to the mempool, which temporarily blocks further + -- download attempts. + startSubmittingTxs txKeys + + start <- getMonotonicTime + MempoolSnapshot { mempoolHasTx } <- atomically mempoolGetSnapshot + + -- Note that checking if the mempool contains a TX before + -- spending several ms attempting to add it to the pool has + -- been judged immoral. + let (alreadyInMempool, pendingSubmit) = partitionBufferedTxs mempoolHasTx bufferedTxs + submitted = [ (txKey, txid') | (txKey, txid', _) <- pendingSubmit ] + toSubmit = [ tx | (_, _, tx) <- pendingSubmit ] + + (acceptedTxIds, _) <- if null toSubmit + then pure ([], []) + else mempoolAddTxs toSubmit + end <- getMonotonicTime + + let (acceptedTxs, rejectedTxs) = + classifySubmittedTxs submitted (Set.fromList acceptedTxIds) + resolvedTxKeys = [ txKey | (txKey, _, _) <- alreadyInMempool ] <> fmap fst acceptedTxs + rejectedForTrace = [ txid' | (_, txid', _) <- alreadyInMempool ] <> fmap snd rejectedTxs + rejectedCount = length rejectedForTrace + delta = end `diffTime` start + + score <- countRejectedTxs end rejectedCount + traceWith tracer $ + TraceTxSubmissionProcessed ProcessedTxCount { + ptxcAccepted = length acceptedTxs, + ptxcRejected = rejectedCount, + ptxcScore = score + } + unless (null acceptedTxs) $ + traceWith tracer (TraceTxInboundAddedToMempool (snd <$> acceptedTxs) delta) + unless (null rejectedForTrace) $ + traceWith tracer (TraceTxInboundRejectedFromMempool rejectedForTrace delta) + + peerState' <- applySubmittedTxs end resolvedTxKeys (fmap fst rejectedTxs) peerState + atomically $ writeTVar peerStateVar peerState' + k + + -- Request transaction bodies from the peer. + requestTxBodies :: forall (n :: N). + StrictTVar m (PeerTxLocalState tx) + -> Nat n + -> [TxKey] + -> m (ServerStIdle n txid tx m ()) + requestTxBodies peerStateVar n txKeys = do + peerState <- readTVarIO peerStateVar + txsToRequest <- resolveTxRequest peerState txKeys + traceWith tracer (TraceTxInboundRequestTxs (Map.keys txsToRequest)) + addCounters mempty { txMessagesSent = 1 + , txsRequested = fromIntegral (Map.size txsToRequest) } + pure $ SendMsgRequestTxsPipelined txsToRequest + (continueAfterBodyRequests peerStateVar (Succ n)) + -- Continue processing after receiving replies from the peer in pipelined mode. + continueAfterReplies :: forall (n :: N). + StrictTVar m (PeerTxLocalState tx) + -> Nat n + -> m (ServerStIdle n txid tx m ()) + continueAfterReplies peerStateVar Zero = serverIdle peerStateVar + continueAfterReplies peerStateVar n@Succ{} = do + peerState <- readTVarIO peerStateVar + now <- getMonotonicTime + (peerAction, peerState') <- runNextPeerActionPipelined now peerState + atomically $ writeTVar peerStateVar peerState' + case peerAction of + PeerSubmitTxs txKeys -> + submitBufferedTxs peerStateVar txKeys (continueAfterReplies peerStateVar n) + PeerRequestTxs txKeys -> + requestTxBodies peerStateVar n txKeys + PeerRequestTxIds txIdsToAck txIdsToReq -> + serverReqTxIds peerStateVar n txIdsToAck txIdsToReq + PeerDoNothing {} -> + handleReplies peerStateVar n + + -- Continue processing after receiving transaction body replies in pipelined mode. + continueAfterBodyRequests :: forall (n :: N). + StrictTVar m (PeerTxLocalState tx) + -> Nat (S n) + -> m (ServerStIdle (S n) txid tx m ()) + continueAfterBodyRequests peerStateVar n = do + peerState <- readTVarIO peerStateVar + now <- getMonotonicTime + (peerAction, peerState') <- runNextPeerActionPipelined now peerState + atomically $ writeTVar peerStateVar peerState' + case peerAction of + PeerSubmitTxs txKeys -> + submitBufferedTxs peerStateVar txKeys (continueAfterReplies peerStateVar n) + PeerRequestTxs txKeys -> + requestTxBodies peerStateVar n txKeys + PeerRequestTxIds txIdsToAck txIdsToReq -> do + serverReqTxIds peerStateVar n txIdsToAck txIdsToReq + PeerDoNothing {} -> + handleReplies peerStateVar n + + -- Construct and send a txid request message to the peer. serverReqTxIds :: forall (n :: N). - Nat n - -> TxDecision txid tx + StrictTVar m (PeerTxLocalState tx) + -> Nat n + -> NumTxIdsToAck + -> NumTxIdsToReq -> m (ServerStIdle n txid tx m ()) - serverReqTxIds - n TxDecision { txdTxIdsToRequest = 0 } - = - case n of - Zero -> serverIdle - Succ _ -> handleReplies n - - serverReqTxIds - -- if there are no unacknowledged txids, the protocol requires sending - -- a blocking `MsgRequestTxIds` request. This is important, as otherwise - -- the client side wouldn't have a chance to terminate the - -- mini-protocol. - Zero TxDecision { txdTxIdsToAcknowledge = txIdsToAck, - txdPipelineTxIds = False, - txdTxIdsToRequest = txIdsToReq - } - = - pure $ SendMsgRequestTxIdsBlocking - txIdsToAck txIdsToReq - -- Our result if the client terminates the protocol - (traceWith tracer TraceTxInboundTerminated) - (\txids -> do - let txids' = NonEmpty.toList txids - txidsSeq = StrictSeq.fromList $ fst <$> txids' - txidsMap = Map.fromList txids' - unless (StrictSeq.length txidsSeq <= fromIntegral txIdsToReq) $ - throwIO ProtocolErrorTxIdsNotRequested - handleReceivedTxIds txIdsToReq txidsSeq txidsMap - serverIdle - ) - - serverReqTxIds - n@Zero TxDecision { txdTxIdsToAcknowledge = txIdsToAck, - txdPipelineTxIds = True, - txdTxIdsToRequest = txIdsToReq - } - = - pure $ SendMsgRequestTxIdsPipelined - txIdsToAck txIdsToReq - (handleReplies (Succ n)) - - serverReqTxIds - n@Succ{} TxDecision { txdTxIdsToAcknowledge = txIdsToAck, - txdPipelineTxIds, - txdTxIdsToRequest = txIdsToReq - } - = - -- it is impossible that we have had `tx`'s to request (Succ{} - is an - -- evidence for that), but no unacknowledged `txid`s. - assert txdPipelineTxIds $ - pure $ SendMsgRequestTxIdsPipelined - txIdsToAck txIdsToReq - (handleReplies (Succ n)) + -- No requests pending; transitions back to @serverIdle@ + serverReqTxIds peerStateVar Zero 0 0 = serverIdle peerStateVar + + -- Requests complete but pipeline not empty, continues to + -- @handleReplies@ to process remaining in-flight replies + serverReqTxIds peerStateVar n@Succ{} 0 0 = handleReplies peerStateVar n + -- Non-pipelined request, may send a blocking request + serverReqTxIds peerStateVar Zero txIdsToAck txIdsToReq = do + peerState <- readTVarIO peerStateVar + addCounters mempty { txIdMessagesSent = 1 + , txIdsRequested = fromIntegral txIdsToReq } + if StrictSeq.null (peerUnacknowledgedTxIds peerState) + then + pure $ SendMsgRequestTxIdsBlocking + txIdsToAck + txIdsToReq + (traceWith tracer TraceTxInboundTerminated) + (\txids -> do + now <- getMonotonicTime + let txids' = NonEmpty.toList txids + unless (length txids' <= fromIntegral txIdsToReq) $ + throwIO ProtocolErrorTxIdsNotRequested + addCounters mempty { txIdRepliesReceived = 1 + , txIdsReceived = fromIntegral (length txids') } + peerStateCurrent <- readTVarIO peerStateVar + peerState' <- applyReceivedTxIds now txIdsToReq txids' peerStateCurrent + atomically $ writeTVar peerStateVar peerState' + serverIdle peerStateVar) + else + pure $ SendMsgRequestTxIdsPipelined + txIdsToAck + txIdsToReq + (handleReplies peerStateVar (Succ Zero)) + -- Pipelined request at depth > 0. Sends a pipelined message and continues + -- to @handleReplies@. + serverReqTxIds peerStateVar n@Succ{} txIdsToAck txIdsToReq = do + addCounters mempty { txIdMessagesSent = 1 + , txIdsRequested = fromIntegral txIdsToReq } + pure $ SendMsgRequestTxIdsPipelined + txIdsToAck + txIdsToReq + (handleReplies peerStateVar (Succ n)) + + -- Prepare to collect pipelined replies from the peer. handleReplies :: forall (n :: N). - Nat (S n) - -> m (ServerStIdle (S n) txid tx m ()) - handleReplies (Succ n'@Succ{}) = - pure $ CollectPipelined - Nothing - (handleReply (handleReplies n')) - - handleReplies (Succ Zero) = - pure $ CollectPipelined - Nothing - (handleReply serverIdle) + StrictTVar m (PeerTxLocalState tx) + -> Nat (S n) + -> m (ServerStIdle (S n) txid tx m ()) + handleReplies peerStateVar (Succ Zero) = + pure $ CollectPipelined Nothing (handleReply peerStateVar Zero) + + handleReplies peerStateVar (Succ n'@Succ{}) = + pure $ CollectPipelined Nothing (handleReply peerStateVar n') + -- Process a single pipelined reply from the peer. handleReply :: forall (n :: N). - m (ServerStIdle n txid tx m ()) - -- continuation + StrictTVar m (PeerTxLocalState tx) + -> Nat n -> Collect txid tx -> m (ServerStIdle n txid tx m ()) - handleReply k = \case + handleReply peerStateVar n = \case CollectTxIds txIdsToReq txids -> do - let txidsSeq = StrictSeq.fromList $ fst <$> txids - txidsMap = Map.fromList txids - unless (StrictSeq.length txidsSeq <= fromIntegral txIdsToReq) $ + unless (length txids <= fromIntegral txIdsToReq) $ throwIO ProtocolErrorTxIdsNotRequested - handleReceivedTxIds txIdsToReq txidsSeq txidsMap - k - CollectTxs txids txs -> do - let requested = Map.keysSet txids - received = Map.fromList [ (txId tx, tx) | tx <- txs ] + addCounters mempty { txIdRepliesReceived = 1 + , txIdsReceived = fromIntegral (length txids) } + peerState <- readTVarIO peerStateVar + now <- getMonotonicTime + peerState' <- applyReceivedTxIds now txIdsToReq txids peerState + atomically $ writeTVar peerStateVar peerState' + continueAfterReplies peerStateVar n - unless (Map.keysSet received `Set.isSubsetOf` requested) $ + CollectTxs requested txs -> do + let received = Map.fromList [ (txId tx, tx) | tx <- txs ] + wrongSizedTxs = collectWrongSizedTxs requested received + unless (Map.keysSet received `Set.isSubsetOf` Map.keysSet requested) $ throwIO ProtocolErrorTxNotRequested + traceWith tracer $ TraceTxSubmissionCollected (txId <$> txs) + unless (null wrongSizedTxs) $ do + let protocolError = ProtocolErrorTxSizeError wrongSizedTxs + traceWith tracer (TraceTxInboundError protocolError) + throwIO protocolError + peerState <- readTVarIO peerStateVar + now <- getMonotonicTime + (penaltyCount, peerState') <- applyReceivedTxs now [ (txId tx, tx) | tx <- txs ] peerState + atomically $ writeTVar peerStateVar peerState' + unless (penaltyCount == 0) $ + void $ countRejectedTxs now penaltyCount + continueAfterReplies peerStateVar n + + -- Partition submitted transactions into accepted and rejected groups + classifySubmittedTxs :: [(TxKey, txid)] + -> Set.Set txid + -> ([(TxKey, txid)], [(TxKey, txid)]) + classifySubmittedTxs submitted accepted = + foldr classify ([], []) submitted + where + classify entry@(_, txid') (acceptedTxs, rejectedTxs) + | Set.member txid' accepted = (entry : acceptedTxs, rejectedTxs) + | otherwise = (acceptedTxs, entry : rejectedTxs) + + -- Partition buffered transactions by mempool presence. + partitionBufferedTxs :: (txid -> Bool) + -> [(TxKey, txid, tx)] + -> ([(TxKey, txid, tx)], [(TxKey, txid, tx)]) + partitionBufferedTxs mempoolHasTx = + foldr step ([], []) + where + step entry@(_, txid', _) (alreadyInMempool, pendingSubmit) + | mempoolHasTx txid' = (entry : alreadyInMempool, pendingSubmit) + | otherwise = (alreadyInMempool, entry : pendingSubmit) + + -- Collect transactions with size mismatches between advertised and actual. + collectWrongSizedTxs :: Map.Map txid SizeInBytes + -> Map.Map txid tx + -> [(txid, SizeInBytes, SizeInBytes)] + collectWrongSizedTxs requestedTxIds receivedTxs = + [ (txid', receivedSize, advertisedSize) + | (txid', tx) <- Map.toList receivedTxs + , let receivedSize = txSize tx + , Just advertisedSize <- [Map.lookup txid' requestedTxIds] + , not (checkTxSize receivedSize advertisedSize) + ] - mbe <- handleReceivedTxs txids received - traceWith tracer $ TraceTxSubmissionCollected (txId `map` txs) - case mbe of - -- one of `tx`s had a wrong size - Just e -> traceWith tracer (TraceTxInboundError e) - >> throwIO e - Nothing -> k + -- Fuzzy size comparison that allows for +/- const_MAX_TX_SIZE_DISCREPANCY. + checkTxSize :: SizeInBytes + -> SizeInBytes + -> Bool + checkTxSize received advertised + | received > advertised = + received - advertised <= const_MAX_TX_SIZE_DISCREPANCY + | otherwise = + advertised - received <= const_MAX_TX_SIZE_DISCREPANCY diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Decision.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Decision.hs deleted file mode 100644 index 552d2f728e9..00000000000 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Decision.hs +++ /dev/null @@ -1,417 +0,0 @@ -{-# LANGUAGE BangPatterns #-} -{-# LANGUAGE BlockArguments #-} -{-# LANGUAGE DerivingVia #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE NamedFieldPuns #-} -{-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE TupleSections #-} -{-# OPTIONS_GHC -Wno-unrecognised-pragmas #-} - -module Ouroboros.Network.TxSubmission.Inbound.V2.Decision - ( TxDecision (..) - , emptyTxDecision - -- * Internal API exposed for testing - , makeDecisions - , filterActivePeers - , pickTxsToDownload - ) where - -import Control.Arrow ((>>>)) -import Control.Exception (assert) - -import Data.Bifunctor (second) -import Data.Hashable -import Data.List qualified as List -import Data.Map.Merge.Strict qualified as Map -import Data.Map.Strict (Map) -import Data.Map.Strict qualified as Map -import Data.Maybe (mapMaybe) -import Data.Set (Set) -import Data.Set qualified as Set -import System.Random (random) - -import Data.Sequence.Strict qualified as StrictSeq -import Ouroboros.Network.Protocol.TxSubmission2.Type -import Ouroboros.Network.TxSubmission.Inbound.V2.Policy -import Ouroboros.Network.TxSubmission.Inbound.V2.State -import Ouroboros.Network.TxSubmission.Inbound.V2.Types - - --- | Make download decisions. --- -makeDecisions - :: forall peeraddr txid tx. - ( Ord peeraddr - , Ord txid - , Hashable peeraddr - ) - => TxDecisionPolicy - -- ^ decision policy - -> SharedTxState peeraddr txid tx - -- ^ decision context - -> Map peeraddr (PeerTxState txid tx) - -- ^ list of available peers. - -- - -- This is a subset of `peerTxStates` of peers which either: - -- * can be used to download a `tx`, - -- * can acknowledge some `txid`s. - -- - -> ( SharedTxState peeraddr txid tx - , Map peeraddr (TxDecision txid tx) - ) -makeDecisions policy st = - let (salt, rng') = random (peerRng st) - st' = st { peerRng = rng' } - in fn - . pickTxsToDownload policy st' - . orderByRejections salt - where - fn :: forall a. - (a, [(peeraddr, TxDecision txid tx)]) - -> (a, Map peeraddr (TxDecision txid tx)) - fn (a, as) = (a, Map.fromList as) - - --- | Order peers by how useful the TXs they have provided are. --- --- TXs delivered late will fail to apply because they were included in --- a recently adopted block. Peers can race against each other by setting --- `txInflightMultiplicity` to > 1. In case of a tie a hash of the peeraddr --- is used as a tie breaker. Since every invocation use a new salt a given --- peeraddr does not have an advantage over time. --- -orderByRejections :: Hashable peeraddr - => Int - -> Map peeraddr (PeerTxState txid tx) - -> [(peeraddr, PeerTxState txid tx)] -orderByRejections salt = - List.sortOn (\(peeraddr, ps) -> (score ps, hashWithSalt salt peeraddr)) - . Map.toList - - --- | Internal state of `pickTxsToDownload` computation. --- -data St peeraddr txid tx = - St { stInflight :: !(Map txid Int), - -- ^ `txid`s in-flight. - - stAcknowledged :: !(Map txid Int), - -- ^ acknowledged `txid` with multiplicities. It is used to update - -- `referenceCounts`. - - stInSubmissionToMempoolTxs :: !(Set txid) - -- ^ TXs on their way to the mempool. Used to prevent issueing new - -- fetch requests for them. - } - - --- | Distribute `tx`'s to download among available peers. Peers are considered --- in the given order. --- --- * pick txs from the set of available tx's (in `txid` order, note these sets --- might be different for different peers). --- * pick txs until the peers in-flight limit (we can go over the limit by one tx) --- (`txsSizeInflightPerPeer` limit) --- * each tx can be downloaded simultaneously from at most --- `txInflightMultiplicity` peers. --- -pickTxsToDownload - :: forall peeraddr txid tx. - ( Ord peeraddr - , Ord txid - ) - => TxDecisionPolicy - -- ^ decision policy - -> SharedTxState peeraddr txid tx - -- ^ shared state - - -> [(peeraddr, PeerTxState txid tx)] - -> ( SharedTxState peeraddr txid tx - , [(peeraddr, TxDecision txid tx)] - ) - -pickTxsToDownload policy@TxDecisionPolicy { txsSizeInflightPerPeer, - txInflightMultiplicity } - sharedState@SharedTxState { peerTxStates, - inflightTxs, - bufferedTxs, - inSubmissionToMempoolTxs, - referenceCounts } = - -- outer fold: fold `[(peeraddr, PeerTxState txid tx)]` - List.mapAccumR - accumFn - -- initial state - St { stInflight = inflightTxs, - stAcknowledged = Map.empty, - stInSubmissionToMempoolTxs = Map.keysSet inSubmissionToMempoolTxs } - - >>> - gn - where - accumFn :: St peeraddr txid tx - -> (peeraddr, PeerTxState txid tx) - -> ( St peeraddr txid tx - , ( (peeraddr, PeerTxState txid tx) - , TxDecision txid tx - ) - ) - accumFn - st@St { stInflight, - stAcknowledged, - stInSubmissionToMempoolTxs } - ( peeraddr - , peerTxState@PeerTxState { availableTxIds, - unknownTxs, - requestedTxsInflight, - requestedTxsInflightSize - } - ) - = - let requestedTxsInflightSize' :: SizeInBytes - txsToRequestMap :: Map txid SizeInBytes - - (requestedTxsInflightSize', txsToRequestMap) = - -- inner fold: fold available `txid`s - -- - -- Note: although `Map.foldrWithKey` could be used here, it - -- does not allow to short circuit the fold, unlike - -- `foldWithState`. - foldWithState - (\(txid, (txSize, inflightMultiplicity)) sizeInflight -> - if -- note that we pick `txid`'s as long the `s` is - -- smaller or equal to `txsSizeInflightPerPeer`. - sizeInflight <= txsSizeInflightPerPeer - -- the transaction must not be downloaded from more - -- than `txInflightMultiplicity` peers simultaneously - && inflightMultiplicity < txInflightMultiplicity - -- TODO: we must validate that `txSize` is smaller than - -- maximum txs size - then Just (sizeInflight + txSize, (txid, txSize)) - else Nothing - ) - (Map.assocs $ - -- merge `availableTxIds` with `stInflight`, so we don't - -- need to lookup into `stInflight` on every `txid` which - -- is in `availableTxIds`. - Map.merge (Map.mapMaybeMissing \_txid -> Just . (,0)) - Map.dropMissing - (Map.zipWithMatched \_txid -> (,)) - - availableTxIds - stInflight - -- remove `tx`s which were already downloaded by some - -- other peer or are in-flight or unknown by this peer. - `Map.withoutKeys` ( - Map.keysSet bufferedTxs - <> requestedTxsInflight - <> unknownTxs - <> stInSubmissionToMempoolTxs - ) - ) - requestedTxsInflightSize - -- pick from `txid`'s which are available from that given - -- peer. Since we are folding a dictionary each `txid` - -- will be selected only once from a given peer (at least - -- in each round). - - txsToRequest = Map.keysSet txsToRequestMap - peerTxState' = peerTxState { - requestedTxsInflightSize = requestedTxsInflightSize', - requestedTxsInflight = requestedTxsInflight - <> txsToRequest - } - - ( numTxIdsToAck - , numTxIdsToReq - , txsToMempool@TxsToMempool { listOfTxsToMempool } - , RefCountDiff { txIdsToAck } - , peerTxState'' - ) = acknowledgeTxIds policy sharedState peerTxState' - - stAcknowledged' = Map.unionWith (+) stAcknowledged txIdsToAck - - stInflightDelta :: Map txid Int - stInflightDelta = Map.fromSet (\_ -> 1) txsToRequest - -- note: this is right since every `txid` - -- could be picked at most once - - stInflight' :: Map txid Int - stInflight' = Map.unionWith (+) stInflightDelta stInflight - - stInSubmissionToMempoolTxs' = stInSubmissionToMempoolTxs - <> Set.fromList (map fst listOfTxsToMempool) - in - if requestedTxIdsInflight peerTxState'' > 0 - then - -- we can request `txid`s & `tx`s - ( St { stInflight = stInflight', - stAcknowledged = stAcknowledged', - stInSubmissionToMempoolTxs = stInSubmissionToMempoolTxs' } - , ( (peeraddr, peerTxState'') - , TxDecision { txdTxIdsToAcknowledge = numTxIdsToAck, - txdPipelineTxIds = not - . StrictSeq.null - . unacknowledgedTxIds - $ peerTxState'', - txdTxIdsToRequest = numTxIdsToReq, - txdTxsToRequest = txsToRequestMap, - txdTxsToMempool = txsToMempool - } - ) - ) - else - -- there are no `txid`s to request, only `tx`s. - ( st { stInflight = stInflight', - stInSubmissionToMempoolTxs = stInSubmissionToMempoolTxs' - } - , ( (peeraddr, peerTxState'') - , emptyTxDecision { txdTxsToRequest = txsToRequestMap } - ) - ) - - gn :: ( St peeraddr txid tx - , [((peeraddr, PeerTxState txid tx), TxDecision txid tx)] - ) - -> ( SharedTxState peeraddr txid tx - , [(peeraddr, TxDecision txid tx)] - ) - gn - ( St { stInflight, - stAcknowledged } - , as - ) - = - let peerTxStates' = Map.fromList ((\(a,_) -> a) <$> as) - <> peerTxStates - - referenceCounts' = - Map.merge (Map.mapMaybeMissing \_ x -> Just x) - (Map.mapMaybeMissing \_ _ -> assert False Nothing) - (Map.zipWithMaybeMatched \_ x y -> if x > y then Just $! x - y - else Nothing) - referenceCounts - stAcknowledged - - liveSet = Map.keysSet referenceCounts' - - bufferedTxs' = bufferedTxs - `Map.restrictKeys` - liveSet - - inSubmissionToMempoolTxs' = - List.foldl' updateInSubmissionToMempoolTxs inSubmissionToMempoolTxs as - - in ( sharedState { - peerTxStates = peerTxStates', - inflightTxs = stInflight, - bufferedTxs = bufferedTxs', - referenceCounts = referenceCounts', - inSubmissionToMempoolTxs = inSubmissionToMempoolTxs'} - , -- exclude empty results - mapMaybe (\((a, _), b) -> case b of - TxDecision { txdTxIdsToAcknowledge = 0, - txdTxIdsToRequest = 0, - txdTxsToRequest, - txdTxsToMempool = TxsToMempool { listOfTxsToMempool } } - | null txdTxsToRequest - , null listOfTxsToMempool - -> Nothing - _ -> Just (a, b) - ) - as - ) - - where - updateInSubmissionToMempoolTxs - :: forall a. - Map txid Int - -> (a, TxDecision txid tx) - -> Map txid Int - updateInSubmissionToMempoolTxs m (_,TxDecision { txdTxsToMempool } ) = - List.foldl' fn m (listOfTxsToMempool txdTxsToMempool) - where - fn :: Map txid Int - -> (txid,tx) - -> Map txid Int - fn x (txid,_) = Map.alter (\case Nothing -> Just 1 - Just n -> Just $! succ n) txid x - - --- | Filter peers which can either download a `tx` or acknowledge `txid`s. --- -filterActivePeers - :: forall peeraddr txid tx. - Ord txid - => TxDecisionPolicy - -> SharedTxState peeraddr txid tx - -> Map peeraddr (PeerTxState txid tx) -filterActivePeers - policy@TxDecisionPolicy { - maxUnacknowledgedTxIds, - txsSizeInflightPerPeer, - txInflightMultiplicity - } - sharedTxState@SharedTxState { - peerTxStates, - bufferedTxs, - inflightTxs, - inSubmissionToMempoolTxs - } = Map.filter gn peerTxStates - where - unrequestable = Map.keysSet (Map.filter (>= txInflightMultiplicity) inflightTxs) - <> Map.keysSet bufferedTxs - - gn :: PeerTxState txid tx -> Bool - gn peerTxState@PeerTxState { unacknowledgedTxIds, - requestedTxIdsInflight, - requestedTxsInflight, - requestedTxsInflightSize, - availableTxIds, - unknownTxs - } = - ( requestedTxIdsInflight == 0 - && requestedTxIdsInflight + numOfUnacked <= maxUnacknowledgedTxIds - && txIdsToRequest > 0 - ) - || (underSizeLimit && not (Map.null downloadable)) - where - numOfUnacked = fromIntegral (StrictSeq.length unacknowledgedTxIds) - underSizeLimit = requestedTxsInflightSize <= txsSizeInflightPerPeer - downloadable = availableTxIds - `Map.withoutKeys` requestedTxsInflight - `Map.withoutKeys` unknownTxs - `Map.withoutKeys` unrequestable - `Map.withoutKeys` Map.keysSet inSubmissionToMempoolTxs - - -- Split `unacknowledgedTxIds'` into the longest prefix of `txid`s which - -- can be acknowledged and the unacknowledged `txid`s. - (txIdsToRequest, _, _) = splitAcknowledgedTxIds policy sharedTxState peerTxState - --- --- Auxiliary functions --- - --- | A fold with state implemented as a `foldr` to take advantage of fold-build --- fusion optimisation. --- -foldWithState - :: forall s a b c. - Ord b - => (a -> s -> Maybe (s, (b, c))) - -> [a] -> s -> (s, Map b c) -{-# INLINE foldWithState #-} - -foldWithState f = foldr cons nil - where - cons :: a - -> (s -> (s, Map b c)) - -> (s -> (s, Map b c)) - cons a k = \ !s -> - case f a s of - Nothing -> nil s - Just (!s', (!b, !c)) -> - case Map.insert b c `second` k s' of - r@(!_s, !_bs) -> r - - nil :: s -> (s, Map b c) - nil = \ !s -> (s, Map.empty) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs index a9d2fad9a4e..e52ed3ee9da 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs @@ -45,6 +45,9 @@ data TxDecisionPolicy = TxDecisionPolicy { -- ^ a limit of tx size in-flight from a single peer. -- It can be exceed by max tx size. + maxOutstandingTxBatchesPerPeer :: !Int, + -- ^ a limit of outstanding tx-body request batches from a single peer. + txInflightMultiplicity :: !Int, -- ^ from how many peers download the `txid` simultaneously @@ -55,9 +58,11 @@ data TxDecisionPolicy = TxDecisionPolicy { scoreRate :: !Double, -- ^ rate at which "rejected" TXs drain. Unit: TX/seconds. - scoreMax :: !Double + scoreMax :: !Double, -- ^ Maximum number of "rejections". Unit: seconds + interTxSpace :: !DiffTime + -- ^ space between actual requests for the same TX. } deriving Show @@ -70,8 +75,10 @@ defaultTxDecisionPolicy = maxNumTxIdsToRequest = 3, maxUnacknowledgedTxIds = 10, -- must be the same as txSubmissionMaxUnacked txsSizeInflightPerPeer = max_TX_SIZE * 6, + maxOutstandingTxBatchesPerPeer = 2, txInflightMultiplicity = 2, bufferedTxsMinLifetime = 2, scoreRate = 0.1, - scoreMax = 15 * 60 + scoreMax = 15 * 60, + interTxSpace = 0.250 } diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 3da6691bd00..15f9b56bfc9 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -1,547 +1,600 @@ {-# LANGUAGE BangPatterns #-} -{-# LANGUAGE BlockArguments #-} -{-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} module Ouroboros.Network.TxSubmission.Inbound.V2.Registry - ( TxChannels (..) - , TxChannelsVar - , TxMempoolSem - , SharedTxStateVar - , newSharedTxStateVar - , newTxChannelsVar - , newTxMempoolSem + ( SharedTxStateVar , PeerTxAPI (..) - , decisionLogicThreads + , TxSubmissionCountersVar + , newSharedTxStateVar + , newTxSubmissionCountersVar + , txCountersThreadV2 , withPeer + -- Exported for testing + , updatePeerPhase + , updatePeerRequestedTxs ) where -import Control.Concurrent.Class.MonadMVar.Strict +import Control.Concurrent.Class.MonadSTM qualified as Lazy import Control.Concurrent.Class.MonadSTM.Strict -import Control.Concurrent.Class.MonadSTM.TSem -import Control.Exception (assert) -import Control.Monad.Class.MonadAsync -import Control.Monad.Class.MonadFork import Control.Monad.Class.MonadThrow import Control.Monad.Class.MonadTime.SI import Control.Monad.Class.MonadTimer.SI import Control.Tracer (Tracer, traceWith) - -import Data.Foldable as Foldable (foldl', traverse_) -import Data.Hashable +import Data.IntMap.Strict qualified as IntMap +import Data.IntSet qualified as IntSet import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map -import Data.Maybe (fromMaybe) -import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq import Data.Set qualified as Set -import Data.Typeable (Typeable) import Data.Void (Void) +import Data.Word (Word64) import Ouroboros.Network.Protocol.TxSubmission2.Type -import Ouroboros.Network.TxSubmission.Inbound.V2.Decision -import Ouroboros.Network.TxSubmission.Inbound.V2.Policy -import Ouroboros.Network.TxSubmission.Inbound.V2.State +import Ouroboros.Network.TxSubmission.Inbound.V2.Policy (TxDecisionPolicy (..)) +import Ouroboros.Network.TxSubmission.Inbound.V2.State qualified as State import Ouroboros.Network.TxSubmission.Inbound.V2.Types import Ouroboros.Network.TxSubmission.Mempool.Reader --- | Communication channels between `TxSubmission` client mini-protocol and --- decision logic. --- -newtype TxChannels m peeraddr txid tx = TxChannels { - txChannelMap :: Map peeraddr (StrictMVar m (TxDecision txid tx)) - } - -type TxChannelsVar m peeraddr txid tx = StrictMVar m (TxChannels m peeraddr txid tx) - -newTxChannelsVar :: MonadMVar m => m (TxChannelsVar m peeraddr txid tx) -newTxChannelsVar = newMVar (TxChannels Map.empty) - -newtype TxMempoolSem m = TxMempoolSem (TSem m) +-- | Shared STM handle for V2 coordination state. +type SharedTxStateVar m peeraddr txid = StrictTVar m (SharedTxState peeraddr txid) + +-- | STM handle for V2 monotonic counters. +type TxSubmissionCountersVar m = StrictTVar m TxSubmissionCounters + +newSharedTxStateVar + :: MonadSTM m + => SharedTxState peeraddr txid + -> m (SharedTxStateVar m peeraddr txid) +newSharedTxStateVar = newTVarIO + +newTxSubmissionCountersVar + :: MonadSTM m + => TxSubmissionCounters + -> m (TxSubmissionCountersVar m) +newTxSubmissionCountersVar = newTVarIO + +-- | Periodically emit the current V2 counters when they change. +txCountersThreadV2 + :: (MonadDelay m, MonadSTM m) + => Tracer m TxSubmissionCounters + -> TxSubmissionCountersVar m + -> m Void +txCountersThreadV2 tracer countersVar = go mempty + where + countersInterval = 7 -newTxMempoolSem :: MonadSTM m => m (TxMempoolSem m) -newTxMempoolSem = TxMempoolSem <$> atomically (newTSem 1) + go !previous = do + threadDelay countersInterval + current <- readTVarIO countersVar + if current /= previous + then traceWith tracer current >> go current + else go previous --- | API to access `PeerTxState` inside `PeerTxStateVar`. +-- | Peer-facing coordination API. -- +-- The peer thread keeps its local protocol state in an local +-- variable. Registry helpers operate only on the shared STM state; any helper +-- that needs peer-local state should take it explicitly as an argument. data PeerTxAPI m txid tx = PeerTxAPI { - readTxDecision :: m (TxDecision txid tx), - -- ^ a blocking action which reads `TxDecision` - - handleReceivedTxIds :: NumTxIdsToReq - -> StrictSeq txid - -- ^ received txids - -> Map txid SizeInBytes - -- ^ received sizes of advertised tx's - -> m (), - -- ^ handle received txids - - handleReceivedTxs :: Map txid SizeInBytes - -- ^ requested txids - -> Map txid tx - -- ^ received txs - -> m (Maybe TxSubmissionProtocolError), - -- ^ handle received txs - - submitTxToMempool :: Tracer m (TraceTxSubmissionInbound txid tx) - -> txid -> tx -> m () - -- ^ submit the given (txid, tx) to the mempool. + -- | Wait until either the peer's generation changes from the given + -- value or the optional timeout expires. + awaitSharedChange :: Word64 + -> Maybe DiffTime + -> m (), + + -- | Compute the next action for this peer in non-pipelined mode. + runNextPeerAction :: Time + -> PeerTxLocalState tx + -> m (PeerAction, PeerTxLocalState tx), + + -- | Compute the next action for this peer in pipelined mode. + runNextPeerActionPipelined :: Time + -> PeerTxLocalState tx + -> m (PeerAction, PeerTxLocalState tx), + + -- | Process a batch of txids received from this peer. + applyReceivedTxIds :: Time + -> NumTxIdsToReq + -> [(txid, SizeInBytes)] + -> PeerTxLocalState tx + -> m (PeerTxLocalState tx), + + -- | Process a batch of tx bodies received from this peer. + applyReceivedTxs :: Time + -> [(txid, tx)] + -> PeerTxLocalState tx + -> m (Int, PeerTxLocalState tx), + + -- | Mark txs as submitted to the mempool and update shared state. + applySubmittedTxs :: Time + -> [TxKey] + -> [TxKey] + -> PeerTxLocalState tx + -> m (PeerTxLocalState tx), + + -- | Update the peer's rejection score based on the number of txs rejected + -- by the mempool, or late/missing delivieries. + countRejectedTxs :: Time + -> Int + -> m Double, + + -- | Resolve txids and advertised sizes for a batch of tx keys to request. + resolveTxRequest :: PeerTxLocalState tx + -> [TxKey] + -> m (Map txid SizeInBytes), + -- | Resolve buffered tx bodies into full submission records. + resolveBufferedTxs :: PeerTxLocalState tx + -> [TxKey] + -> m [(TxKey, txid, tx)], + -- | Mark the given tx keys as entering mempool submission phase in shared + -- state. + startSubmittingTxs :: [TxKey] -> m (), + + -- | Add a delta to the V2 monotonic counters. + addCounters :: TxSubmissionCounters -> m () } - -data TxMempoolResult = TxAccepted | TxRejected - +-- -- | A bracket function which registers / de-registers a new peer in --- `SharedTxStateVar` and `PeerTxStateVar`s, which exposes `PeerTxStateAPI`. +-- `SharedTxStateVar`, which exposes `PeerTxStateAPI`. -- `PeerTxStateAPI` is only safe inside the `withPeer` scope. -- withPeer - :: forall tx peeraddr txid idx m err a. - ( MonadMask m - , MonadMVar m - , MonadSTM m - , MonadMonotonicTime m - , Ord txid - , Show txid - , Typeable txid - , Ord peeraddr - , Show peeraddr - ) - => Tracer m (TraceTxLogic peeraddr txid tx) - -> TxChannelsVar m peeraddr txid tx - -> TxMempoolSem m - -> TxDecisionPolicy - -> SharedTxStateVar m peeraddr txid tx - -> TxSubmissionMempoolReader txid tx idx m - -> TxSubmissionMempoolWriter txid tx idx m err - -> (tx -> SizeInBytes) - -> peeraddr - -- ^ new peer - -> (PeerTxAPI m txid tx -> m a) - -- ^ callback which gives access to `PeerTxStateAPI` - -> m a -withPeer tracer - channelsVar - (TxMempoolSem mempoolSem) - policy@TxDecisionPolicy { bufferedTxsMinLifetime } - sharedStateVar - TxSubmissionMempoolReader { mempoolGetSnapshot } - TxSubmissionMempoolWriter { mempoolAddTxs } - txSize - peeraddr io = + :: forall peeraddr txid tx idx m a. + ( MonadMask m + , MonadTimer m + , Ord peeraddr + , Ord txid + ) + => TxDecisionPolicy + -> TxSubmissionMempoolReader txid tx idx m + -> SharedTxStateVar m peeraddr txid + -> TxSubmissionCountersVar m + -> peeraddr + -> (PeerTxAPI m txid tx -> m a) + -> m a +withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar countersVar peeraddr io = bracket - (do -- create a communication channel - !peerTxAPI <- - modifyMVar channelsVar - \ TxChannels { txChannelMap } -> do - chann <- newEmptyMVar - let (chann', txChannelMap') = - Map.alterF (\mbChann -> - let !chann'' = fromMaybe chann mbChann - in (chann'', Just chann'')) - peeraddr - txChannelMap - return - ( TxChannels { txChannelMap = txChannelMap' } - , PeerTxAPI { readTxDecision = takeMVar chann', - handleReceivedTxIds, - handleReceivedTxs, - submitTxToMempool } - ) - - atomically $ modifyTVar sharedStateVar registerPeer - return peerTxAPI - ) - -- the handler is a short blocking operation, thus we need to use - -- `uninterruptibleMask_` - (\_ -> uninterruptibleMask_ do - atomically $ modifyTVar sharedStateVar unregisterPeer - modifyMVar_ channelsVar - \ TxChannels { txChannelMap } -> - return TxChannels { txChannelMap = Map.delete peeraddr txChannelMap } + (do + now <- getMonotonicTime + atomically $ modifyTVar sharedStateVar (registerPeer now) + pure PeerTxAPI { + awaitSharedChange = awaitSharedChangeImp sharedStateVar peeraddr + , runNextPeerAction = runNextPeerActionImp policy sharedStateVar peeraddr + , runNextPeerActionPipelined = runNextPeerActionPipelinedImp policy sharedStateVar + peeraddr + , applyReceivedTxIds = applyReceivedTxIdsImp policy mempoolGetSnapshot sharedStateVar + peeraddr + , applyReceivedTxs = applyReceivedTxsImp policy mempoolGetSnapshot sharedStateVar + countersVar peeraddr + , applySubmittedTxs = applySubmittedTxsImp policy sharedStateVar peeraddr + , countRejectedTxs = countRejectedTxsImp policy sharedStateVar peeraddr + , resolveTxRequest = resolveTxRequestImp sharedStateVar + , resolveBufferedTxs = resolveBufferedTxsImp sharedStateVar + , startSubmittingTxs = atomically . modifyTVar sharedStateVar . + State.markSubmittingTxs peeraddr + , addCounters = \delta -> atomically $ modifyTVar countersVar (<> delta) + } ) + (\_ -> atomically $ modifyTVar sharedStateVar unregisterPeer) io where - registerPeer :: SharedTxState peeraddr txid tx - -> SharedTxState peeraddr txid tx - registerPeer st@SharedTxState { peerTxStates } = - st { peerTxStates = - Map.insert - peeraddr - PeerTxState { - availableTxIds = Map.empty, - requestedTxIdsInflight = 0, - requestedTxsInflightSize = 0, - requestedTxsInflight = Set.empty, - unacknowledgedTxIds = StrictSeq.empty, - unknownTxs = Set.empty, - score = 0, - scoreTs = Time 0, - downloadedTxs = Map.empty, - toMempoolTxs = Map.empty } - peerTxStates - } - - -- TODO: this function needs to be tested! - -- Issue: https://github.com/IntersectMBO/ouroboros-network/issues/5151 - unregisterPeer :: SharedTxState peeraddr txid tx - -> SharedTxState peeraddr txid tx - unregisterPeer st@SharedTxState { peerTxStates, - bufferedTxs, - referenceCounts, - inflightTxs, - inSubmissionToMempoolTxs } = - st { peerTxStates = peerTxStates', - bufferedTxs = bufferedTxs', - referenceCounts = referenceCounts', - inflightTxs = inflightTxs', - inSubmissionToMempoolTxs = inSubmissionToMempoolTxs' } + registerPeer :: Time -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid + registerPeer now st@SharedTxState { sharedPeers, sharedGeneration } = + st { + sharedPeers = Map.insert peeraddr sharedPeerState sharedPeers, + sharedGeneration = sharedGeneration + 1 + } where - (PeerTxState { unacknowledgedTxIds, - requestedTxsInflight, - toMempoolTxs } - , peerTxStates') - = - Map.alterF - (\case - Nothing -> error ("TxSubmission.withPeer: invariant violation for peer " ++ show peeraddr) - Just a -> (a, Nothing)) - peeraddr - peerTxStates - - referenceCounts' = - Foldable.foldl' - (flip $ Map.update \cnt -> - if cnt > 1 - then Just $! pred cnt - else Nothing - ) - referenceCounts - unacknowledgedTxIds - - liveSet = Map.keysSet referenceCounts' - - bufferedTxs' = bufferedTxs - `Map.restrictKeys` - liveSet - - inflightTxs' = Foldable.foldl' purgeInflightTxs inflightTxs requestedTxsInflight - - -- When we unregister a peer, we need to subtract all txs in the - -- `toMempoolTxs`, as they will not be submitted to the mempool. - inSubmissionToMempoolTxs' = - Foldable.foldl' (flip $ Map.update \cnt -> - if cnt > 1 - then Just $! pred cnt - else Nothing - ) - inSubmissionToMempoolTxs - (Map.keysSet toMempoolTxs) - - purgeInflightTxs m txid = Map.alter fn txid m - where - fn (Just n) | n > 1 = Just $! pred n - fn _ = Nothing - - -- - -- PeerTxAPI - -- - - submitTxToMempool :: Tracer m (TraceTxSubmissionInbound txid tx) -> txid -> tx -> m () - submitTxToMempool txTracer txid tx = - bracket_ (atomically $ waitTSem mempoolSem) - (atomically $ signalTSem mempoolSem) - $ do - start <- getMonotonicTime - res <- addTx - end <- getMonotonicTime - atomically $ modifyTVar sharedStateVar (updateBufferedTx end res) - let duration = end `diffTime` start - case res of - TxAccepted -> traceWith txTracer (TraceTxInboundAddedToMempool [txid] duration) - TxRejected -> traceWith txTracer (TraceTxInboundRejectedFromMempool [txid] duration) - - where - -- add the tx to the mempool - addTx :: m TxMempoolResult - addTx = do - mpSnapshot <- atomically mempoolGetSnapshot - - -- Note that checking if the mempool contains a TX before - -- spending several ms attempting to add it to the pool has - -- been judged immoral. - if mempoolHasTx mpSnapshot txid - then do - !now <- getMonotonicTime - !s <- countRejectedTxs now 1 - traceWith txTracer $ TraceTxSubmissionProcessed ProcessedTxCount { - ptxcAccepted = 0 - , ptxcRejected = 1 - , ptxcScore = s - } - return TxRejected - else do - (acceptedTxs, _) <- mempoolAddTxs [tx] - end <- getMonotonicTime - case acceptedTxs of - [] -> do - !s <- countRejectedTxs end 1 - traceWith txTracer $ TraceTxSubmissionProcessed ProcessedTxCount { - ptxcAccepted = 0 - , ptxcRejected = 1 - , ptxcScore = s - } - return TxRejected - (_:_) -> do - !s <- countRejectedTxs end 0 - traceWith txTracer $ TraceTxSubmissionProcessed ProcessedTxCount { - ptxcAccepted = 1 - , ptxcRejected = 0 - , ptxcScore = s - } - return TxAccepted - - updateBufferedTx :: Time - -> TxMempoolResult - -> SharedTxState peeraddr txid tx - -> SharedTxState peeraddr txid tx - updateBufferedTx _ TxRejected st@SharedTxState { peerTxStates - , inSubmissionToMempoolTxs } = - st { peerTxStates = peerTxStates' - , inSubmissionToMempoolTxs = inSubmissionToMempoolTxs' } - where - inSubmissionToMempoolTxs' = - Map.update (\case 1 -> Nothing; n -> Just $! pred n) - txid inSubmissionToMempoolTxs - - peerTxStates' = Map.update fn peeraddr peerTxStates - where - fn ps = Just $! ps { toMempoolTxs = Map.delete txid (toMempoolTxs ps)} - - updateBufferedTx now TxAccepted - st@SharedTxState { peerTxStates - , bufferedTxs - , referenceCounts - , timedTxs - , inSubmissionToMempoolTxs } = - st { peerTxStates = peerTxStates' - , bufferedTxs = bufferedTxs' - , timedTxs = timedTxs' - , referenceCounts = referenceCounts' - , inSubmissionToMempoolTxs = inSubmissionToMempoolTxs' - } - where - inSubmissionToMempoolTxs' = - Map.update (\case 1 -> Nothing; n -> Just $! pred n) - txid inSubmissionToMempoolTxs - - timedTxs' = Map.alter fn (addTime bufferedTxsMinLifetime now) timedTxs - where - fn :: Maybe [txid] -> Maybe [txid] - fn Nothing = Just [txid] - fn (Just txids) = Just $! (txid:txids) - - referenceCounts' = Map.alter fn txid referenceCounts - where - fn :: Maybe Int -> Maybe Int - fn Nothing = Just 1 - fn (Just n) = Just $! succ n - - bufferedTxs' = Map.insert txid (Just tx) bufferedTxs - - peerTxStates' = Map.update fn peeraddr peerTxStates - where - fn ps = Just $! ps { toMempoolTxs = Map.delete txid (toMempoolTxs ps)} - - handleReceivedTxIds :: NumTxIdsToReq - -> StrictSeq txid - -> Map txid SizeInBytes - -> m () - handleReceivedTxIds numTxIdsToReq txidsSeq txidsMap = - receivedTxIds tracer - sharedStateVar - mempoolGetSnapshot - peeraddr - numTxIdsToReq - txidsSeq - txidsMap - - - handleReceivedTxs :: Map txid SizeInBytes - -- ^ requested txids with their announced size - -> Map txid tx - -- ^ received txs - -> m (Maybe TxSubmissionProtocolError) - handleReceivedTxs txids txs = - collectTxs tracer txSize sharedStateVar peeraddr txids txs - - -- Update `score` & `scoreTs` fields of `PeerTxState`, return the new - -- updated `score`. - -- - -- PRECONDITION: the `Double` argument is non-negative. - countRejectedTxs :: Time - -> Double - -> m Double - countRejectedTxs _ n | n < 0 = - error ("TxSubmission.countRejectedTxs: invariant violation for peer " ++ show peeraddr) - countRejectedTxs now n = atomically $ stateTVar sharedStateVar $ \st -> - let (result, peerTxStates') = Map.alterF fn peeraddr (peerTxStates st) - in (result, st { peerTxStates = peerTxStates' }) + sharedPeerState = SharedPeerState { + sharedPeerPhase = PeerIdle, + sharedPeerScore = emptyPeerScore now, + sharedPeerGeneration = 0, + sharedPeerRequestedTxBatches = 0, + sharedPeerRequestedTxsSize = 0 + } + + unregisterPeer :: SharedTxState peeraddr txid -> SharedTxState peeraddr txid + unregisterPeer st@SharedTxState { sharedPeers, sharedTxTable, sharedRetainedTxs, sharedTxIdToKey, sharedKeyToTxId, sharedGeneration } = + bumpIdlePeerGenerations peersToWake $ st { + sharedPeers = sharedPeers', + sharedTxTable = sharedTxTable', + sharedRetainedTxs = sharedRetainedTxs, + sharedTxIdToKey = Map.filter (\txKey -> IntSet.member (unTxKey txKey) liveKeys) sharedTxIdToKey, + sharedKeyToTxId = IntMap.restrictKeys sharedKeyToTxId liveKeys, + sharedGeneration = sharedGeneration + 1 + } where - fn :: Maybe (PeerTxState txid tx) -> (Double, Maybe (PeerTxState txid tx)) - fn Nothing = error ("TxSubmission.withPeer: invariant violation for peer " ++ show peeraddr) - fn (Just ps) = (score ps', Just $! ps') - where - ps' = updateRejects policy now n ps - - -updateRejects :: TxDecisionPolicy - -> Time - -> Double - -> PeerTxState txid tx - -> PeerTxState txid tx -updateRejects _ now 0 pts | score pts == 0 = pts {scoreTs = now} -updateRejects TxDecisionPolicy { scoreRate, scoreMax } now n - pts@PeerTxState { score, scoreTs } = - let duration = diffTime now scoreTs - !drain = realToFrac duration * scoreRate - !drained = max 0 $ score - drain in - pts { score = min scoreMax $ drained + n - , scoreTs = now - } - - -drainRejectionThread - :: forall m peeraddr txid tx. - ( MonadDelay m - , MonadSTM m - , MonadThread m - , Ord txid - ) - => Tracer m (TraceTxLogic peeraddr txid tx) - -> TxDecisionPolicy - -> SharedTxStateVar m peeraddr txid tx - -> m Void -drainRejectionThread tracer policy sharedStateVar = do - labelThisThread "tx-rejection-drain" - now <- getMonotonicTime - go $ addTime drainInterval now + sharedPeers' = Map.delete peeraddr sharedPeers + + (sharedTxTable', peersToWake) = + IntMap.foldlWithKey' scrubOne (IntMap.empty, Set.empty) sharedTxTable + liveKeys = IntMap.keysSet sharedTxTable' `IntSet.union` retainedKeysSet sharedRetainedTxs + + scrubOne (txTableAcc, wakeAcc) k txEntry = + let touched = txTouchesPeer txEntry + txEntry' = scrubTxEntry txEntry + in if txLive txEntry' + then ( IntMap.insert k txEntry' txTableAcc + , if touched + then Set.union wakeAcc (Set.delete peeraddr (Map.keysSet (txAdvertisers txEntry'))) + else wakeAcc + ) + else (txTableAcc, wakeAcc) + + scrubTxEntry txEntry@TxEntry { txLease, txAdvertisers, txAttempts } = + txEntry { + txLease = scrubLease txLease, + txAdvertisers = Map.delete peeraddr txAdvertisers, + txAttempts = Map.delete peeraddr txAttempts + } + + scrubLease (TxLeased owner leaseUntil) + | owner == peeraddr = TxClaimable + | otherwise = TxLeased owner leaseUntil + scrubLease TxClaimable = TxClaimable + + txTouchesPeer TxEntry { txLease, txAdvertisers, txAttempts } = + leaseOwnedByPeer txLease + || Map.member peeraddr txAdvertisers + || Map.member peeraddr txAttempts + + txLive TxEntry { txLease, txAdvertisers, txAttempts } = + leaseLive txLease + || not (Map.null txAdvertisers) + || not (Map.null txAttempts) + + leaseOwnedByPeer (TxLeased owner _) = owner == peeraddr + leaseOwnedByPeer TxClaimable = False + + leaseLive TxClaimable = False + leaseLive (TxLeased _ _) = True + +-- | Wait until either the peer's generation changes from the given +-- value or the optional timeout expires. +-- +-- Used by idle peers to avoid busy-waiting while still being woken when relevant cross-peer +-- state (such as lease expiries or new tx advertisements) changes. +awaitSharedChangeImp :: ( MonadTimer m + , Ord peeraddr ) + => SharedTxStateVar m peeraddr txid + -> peeraddr + -> Word64 + -> Maybe DiffTime + -> m () +awaitSharedChangeImp sharedStateVar peeraddr generation mDelay = + case mDelay of + Nothing -> + atomically $ do + sharedState <- readTVar sharedStateVar + let generation' = peerGenerationOf peeraddr sharedState + check (generation' /= generation) + Just delay -> do + delayVar <- registerDelay delay + atomically $ do + sharedState <- readTVar sharedStateVar + let generation' = peerGenerationOf peeraddr sharedState + expired <- Lazy.readTVar delayVar + check (generation' /= generation || expired) + +-- | Compute the next action for this peer in non-pipelined mode. +-- +-- Returns the selected 'PeerAction', an updated peer-local state, and applies +-- changes to shared state (such as lease/advertiser coordination). +-- Called from the main peer loop when not handling pipelined replies. +runNextPeerActionImp :: ( MonadSTM m + , Ord peeraddr + , Ord txid ) + => TxDecisionPolicy + -> SharedTxStateVar m peeraddr txid + -> peeraddr + -> Time + -> PeerTxLocalState tx + -> m (PeerAction, PeerTxLocalState tx) +runNextPeerActionImp policy sharedStateVar peeraddr now peerState = atomically $ do + sharedState <- readTVar sharedStateVar + let (peerAction, peerState', sharedState') = State.nextPeerAction now policy peeraddr + peerState sharedState + sharedState'' = updatePeerPhase peeraddr (peerPhaseForActionIdle peerAction) sharedState' + sharedState''' = updatePeerRequestedTxs policy peeraddr peerState' sharedState'' + writeTVar sharedStateVar sharedState''' + return (peerAction, peerState') + +-- | Compute the next action for this peer in pipelined mode. +-- +-- Similar to 'runNextPeerAction' but allows pipelined txid request messages where +-- both acknowledgments and requests can be sent together. Used when waiting for +-- pipelined protocol replies. +runNextPeerActionPipelinedImp :: ( MonadSTM m + , Ord peeraddr + , Ord txid ) + => TxDecisionPolicy + -> SharedTxStateVar m peeraddr txid + -> peeraddr + -> Time + -> PeerTxLocalState tx + -> m (PeerAction, PeerTxLocalState tx) +runNextPeerActionPipelinedImp policy sharedStateVar peeraddr now peerState = atomically $ do + sharedState <- readTVar sharedStateVar + let (peerAction, peerState', sharedState') = State.nextPeerActionPipelined now policy + peeraddr peerState sharedState + sharedState'' = updatePeerPhase peeraddr + (peerPhaseForActionPipelined peeraddr peerAction sharedState') + sharedState' + sharedState''' = updatePeerRequestedTxs policy peeraddr peerState' sharedState'' + writeTVar sharedStateVar sharedState''' + return (peerAction, peerState') + +-- | Process a batch of txids received from this peer. +-- +-- Interns new txids into the shared state, updates the peer's unacknowledged queue, +-- handles mempool fast-path for already-known txids, and sets up initial lease +-- ownership for first advertisers. Returns updated peer-local state. +applyReceivedTxIdsImp :: ( MonadSTM m + , Ord peeraddr + , Ord txid ) + => TxDecisionPolicy + -> STM m (MempoolSnapshot txid tx idx) + -> SharedTxStateVar m peeraddr txid + -> peeraddr + -> Time + -> NumTxIdsToReq + -> [(txid, SizeInBytes)] + -> PeerTxLocalState tx + -> m (PeerTxLocalState tx) +applyReceivedTxIdsImp policy mempoolGetSnapshot sharedStateVar peeraddr now txIdsToReq + txidsAndSizes peerState = atomically $ do + MempoolSnapshot { mempoolHasTx } <- mempoolGetSnapshot + sharedState <- readTVar sharedStateVar + let (peerState', sharedState') = State.handleReceivedTxIds mempoolHasTx now policy peeraddr + txIdsToReq txidsAndSizes peerState sharedState + sharedState'' = updatePeerRequestedTxs policy peeraddr peerState' sharedState' + writeTVar sharedStateVar sharedState'' + return peerState' + +-- | Process a batch of tx bodies received from this peer. +-- +-- Buffers the received bodies in peer-local state, updates shared advertiser tracking, +-- and handles omitted bodies by releasing ownership so other advertisers may claim them. +-- Returns the combined penalty count for bodies that were already resolved locally or +-- missing from the reply, together with the updated peer-local state. +applyReceivedTxsImp :: ( MonadSTM m + , Ord peeraddr + , Ord txid ) + => TxDecisionPolicy + -> STM m (MempoolSnapshot txid tx idx) + -> SharedTxStateVar m peeraddr txid + -> TxSubmissionCountersVar m + -> peeraddr + -> Time + -> [(txid, tx)] + -> PeerTxLocalState tx + -> m (Int, PeerTxLocalState tx) +applyReceivedTxsImp policy mempoolGetSnapshot sharedStateVar countersVar peeraddr now txs + peerState = atomically $ do + MempoolSnapshot { mempoolHasTx } <- mempoolGetSnapshot + sharedState <- readTVar sharedStateVar + let (omittedCount, lateCount, peerState', sharedState') = + State.handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState + sharedState'' = + updatePeerRequestedTxs policy peeraddr peerState' sharedState' + writeTVar sharedStateVar sharedState'' + modifyTVar countersVar (<> mempty { + txRepliesReceived = 1, + txsReceived = fromIntegral (length txs), + txsOmitted = fromIntegral omittedCount, + lateBodies = fromIntegral lateCount + }) + return (omittedCount + lateCount, peerState') + +-- | Mark txs as submitted to the mempool and update shared state. +-- +-- Takes the keys that were resolved (either already-in-mempool or successfully +-- submitted) and the keys that were rejected, updating lease ownership, advertiser +-- ack states, and peer rejection scores. +-- Returns updated peer-local state. +applySubmittedTxsImp :: ( MonadSTM m + , Ord peeraddr + , Ord txid ) + => TxDecisionPolicy + -> SharedTxStateVar m peeraddr txid + -> peeraddr + -> Time + -> [TxKey] + -> [TxKey] + -> PeerTxLocalState tx + -> m (PeerTxLocalState tx) +applySubmittedTxsImp policy sharedStateVar peeraddr now acceptedTxs rejectedTxs peerState = + atomically $ do + sharedState <- readTVar sharedStateVar + let (peerState', sharedState') = State.handleSubmittedTxs now policy peeraddr acceptedTxs + rejectedTxs peerState sharedState + sharedState'' = updatePeerRequestedTxs policy peeraddr peerState' sharedState' + writeTVar sharedStateVar sharedState'' + return peerState' + +-- | Update the peer's rejection score based on the number of txs rejected +-- by the mempool. +-- Returns the new score value for tracing. The score +-- decays over time and affects fallback peer selection when leases expire. +countRejectedTxsImp :: ( MonadSTM m + , Ord peeraddr) + => TxDecisionPolicy + -> SharedTxStateVar m peeraddr txid + -> peeraddr + -> Time + -> Int + -> m Double +countRejectedTxsImp TxDecisionPolicy { scoreRate, scoreMax } sharedStateVar peeraddr now + rejectedCount = atomically $ stateTVar sharedStateVar $ + updatePeerRejects (fromIntegral rejectedCount) where - drainInterval :: DiffTime - drainInterval = 7 - - go :: Time -> m Void - go !nextDrain = do - threadDelay 1 - - !now <- getMonotonicTime - st'' <- atomically $ do - st <- readTVar sharedStateVar - let ptss = if now > nextDrain then Map.map (updateRejects policy now 0) (peerTxStates st) - else peerTxStates st - st' = tickTimedTxs now st - { peerTxStates = ptss } - writeTVar sharedStateVar st' - return st' - traceWith tracer (TraceSharedTxState "drainRejectionThread" st'') - - if now > nextDrain - then go $ addTime drainInterval now - else go nextDrain - - -decisionLogicThread - :: forall m peeraddr txid tx. - ( MonadDelay m - , MonadMVar m - , MonadSTM m - , MonadMask m - , MonadFork m - , Ord peeraddr - , Ord txid - , Hashable peeraddr - ) - => Tracer m (TraceTxLogic peeraddr txid tx) - -> Tracer m TxSubmissionCounters - -> TxDecisionPolicy - -> TxChannelsVar m peeraddr txid tx - -> SharedTxStateVar m peeraddr txid tx - -> m Void -decisionLogicThread tracer counterTracer policy txChannelsVar sharedStateVar = do - labelThisThread "tx-decision" - go + updatePeerRejects n sharedState = + case Map.lookup peeraddr (sharedPeers sharedState) of + Nothing -> (0, sharedState) -- TODO this is an invariant violation + Just sharedPeerState@SharedPeerState { sharedPeerScore } -> + let sharedPeerScore' = updateRejects n sharedPeerScore + sharedPeerState' = sharedPeerState { sharedPeerScore = sharedPeerScore' } + sharedState' = sharedState { + sharedPeers = Map.insert peeraddr sharedPeerState' (sharedPeers sharedState), + sharedGeneration = sharedGeneration sharedState + 1 + } in + (peerScoreValue sharedPeerScore', sharedState') + + updateRejects 0 ps@PeerScore { peerScoreValue = 0 } = ps { peerScoreTs = now } + updateRejects n ps@PeerScore { peerScoreValue, peerScoreTs } = + let duration = diffTime now peerScoreTs + !drain = realToFrac duration * scoreRate + !drained = max 0 (peerScoreValue - drain) in + ps { peerScoreValue = min scoreMax (drained + n) + , peerScoreTs = now } + +-- | Resolve txids and advertised sizes for a batch of tx keys to request. +-- +-- Looks up the real txid and size from peer-local state for building the +-- protocol message. Used before sending 'MsgRequestTxs'. +resolveTxRequestImp :: ( MonadSTM m + , Ord txid ) + => SharedTxStateVar m peeraddr txid + -> PeerTxLocalState tx + -> [TxKey] + -> m (Map txid SizeInBytes) +resolveTxRequestImp sharedStateVar peerState txKeys = atomically $ do + sharedState <- readTVar sharedStateVar + return $ Map.fromList (fmap (resolveOne sharedState) txKeys) + where + resolveOne sharedState key@(TxKey k) = + ( resolveTxKey sharedState key + , case IntMap.lookup k (peerAvailableTxIds peerState) of + Just txSize -> txSize + Nothing -> error "TxSubmission.V2.resolveTxRequestImp: missing tx size" + ) + +-- | Resolve buffered tx bodies into full submission records. +-- +-- Takes tx keys that have been downloaded and buffered locally, looks up their txids and +-- body values from peer-local state, and returns triples ready for mempool submission. +-- Used when submitting txs after body collection. +resolveBufferedTxsImp :: ( MonadSTM m + ) + => SharedTxStateVar m peeraddr txid + -> PeerTxLocalState tx + -> [TxKey] + -> m [(TxKey, txid, tx)] +resolveBufferedTxsImp sharedStateVar peerState txKeys = atomically $ do + sharedState <- readTVar sharedStateVar + return $ fmap (resolveOne sharedState) txKeys where - go :: m Void - go = do - -- We rate limit the decision making process, it could overwhelm the CPU - -- if there are too many inbound connections. - threadDelay _DECISION_LOOP_DELAY - - (decisions, st) <- atomically do - sharedTxState <- readTVar sharedStateVar - let activePeers = filterActivePeers policy sharedTxState - - -- block until at least one peer is active - check (not (Map.null activePeers)) - - let (sharedState, decisions) = makeDecisions policy sharedTxState activePeers - writeTVar sharedStateVar sharedState - return (decisions, sharedState) - traceWith tracer (TraceSharedTxState "decisionLogicThread" st) - traceWith tracer (TraceTxDecisions decisions) - TxChannels { txChannelMap } <- readMVar txChannelsVar - traverse_ - (\(mvar, d) -> - modifyMVarWithDefault_ mvar d (\d' -> - let left = Set.fromList . fmap fst $ listOfTxsToMempool (txdTxsToMempool d) - right = Set.fromList . fmap fst $ listOfTxsToMempool (txdTxsToMempool d') - shared = Set.intersection left right - in assert (Set.null shared) $ pure (d' <> d))) - (Map.intersectionWith (,) - txChannelMap - decisions) - traceWith counterTracer (mkTxSubmissionCounters st) - go - - -- Variant of modifyMVar_ that puts a default value if the MVar is empty. - modifyMVarWithDefault_ :: StrictMVar m a -> a -> (a -> m a) -> m () - modifyMVarWithDefault_ m d io = - mask $ \restore -> do - mbA <- tryTakeMVar m - case mbA of - Just a -> do - a' <- restore (io a) `onException` putMVar m a - putMVar m a' - Nothing -> putMVar m d - - --- | Run `decisionLogicThread` and `drainRejectionThread`. + resolveOne sharedState key@(TxKey k) = + ( key + , resolveTxKey sharedState key + , case IntMap.lookup k (peerDownloadedTxs peerState) of + Just tx -> tx + Nothing -> error "TxSubmission.V2.resolveBufferedTxsImp: missing buffered tx" + ) + +-- | Update a peer's phase. +-- +-- A phase change always bumps the shared generation. In addition: -- -decisionLogicThreads - :: forall m peeraddr txid tx. - ( MonadDelay m - , MonadMVar m - , MonadMask m - , MonadAsync m - , MonadFork m - , Ord peeraddr - , Ord txid - , Hashable peeraddr - ) - => Tracer m (TraceTxLogic peeraddr txid tx) - -> Tracer m TxSubmissionCounters - -> TxDecisionPolicy - -> TxChannelsVar m peeraddr txid tx - -> SharedTxStateVar m peeraddr txid tx - -> m Void -decisionLogicThreads tracer counterTracer policy txChannelsVar sharedStateVar = - uncurry (<>) <$> - drainRejectionThread tracer policy sharedStateVar - `concurrently` - decisionLogicThread tracer counterTracer policy txChannelsVar sharedStateVar - - --- `5ms` delay -_DECISION_LOOP_DELAY :: DiffTime -_DECISION_LOOP_DELAY = 0.005 +-- * When a peer becomes 'PeerIdle', bump that peer's own generation so a +-- 'PeerDoNothing' action computed before the phase change does not put that +-- same peer thread to sleep on a stale generation. This makes its next +-- 'awaitSharedChange' return immediately and re-run scheduling as an idle +-- claimant. +-- * When a peer leaves 'PeerIdle', bump the generations of other advertisers +-- for txs advertised by that peer. Claim-owner selection only considers idle +-- peers, so removing one idle advertiser can change which remaining idle +-- peer should wake and claim a tx. +updatePeerPhase + :: Ord peeraddr + => peeraddr + -> PeerPhase + -> SharedTxState peeraddr txid + -> SharedTxState peeraddr txid +updatePeerPhase peeraddr peerPhaseNew st@SharedTxState { sharedPeers, sharedGeneration } = + case Map.lookup peeraddr sharedPeers of + Just sharedPeerState -> + let peerPhaseOld = sharedPeerPhase sharedPeerState in + if peerPhaseOld /= peerPhaseNew + then + let st' = st { sharedPeers = Map.insert peeraddr + (sharedPeerState { sharedPeerPhase = peerPhaseNew }) sharedPeers + , sharedGeneration = sharedGeneration + 1 } in + bumpIdlePeerGenerations (phaseWakePeers peerPhaseOld) st' + else st + _ -> st -- TODO error? + where + phaseWakePeers peerPhaseOld + | peerPhaseOld /= PeerIdle + , peerPhaseNew == PeerIdle = Set.singleton peeraddr + | peerPhaseOld == PeerIdle + , peerPhaseNew /= PeerIdle = advertisersForPeerTxsExcept peeraddr st + | otherwise = Set.empty + + +-- | Update the peer's shared TX state so that it is in sync with its local state. +updatePeerRequestedTxs + :: Ord peeraddr + => TxDecisionPolicy + -> peeraddr + -> PeerTxLocalState tx + -> SharedTxState peeraddr txid + -> SharedTxState peeraddr txid +updatePeerRequestedTxs _policy peeraddr peerState + st@SharedTxState { sharedPeers, sharedGeneration } = + case Map.lookup peeraddr sharedPeers of + Just sharedPeerState -> + if sharedPeerRequestedTxBatches sharedPeerState /= requestedTxBatches + || sharedPeerRequestedTxsSize sharedPeerState /= requestedTxsSize + then + let sharedPeerState' = sharedPeerState { + sharedPeerRequestedTxBatches = requestedTxBatches + , sharedPeerRequestedTxsSize = requestedTxsSize } + sharedPeers' = Map.insert peeraddr sharedPeerState' sharedPeers in + st { sharedPeers = sharedPeers' + , sharedGeneration = sharedGeneration + 1 } + else st + _ -> st -- TODO: error? + where + requestedTxBatches = StrictSeq.length (peerRequestedTxBatches peerState) + requestedTxsSize = peerRequestedTxsSize peerState + +advertisersForPeerTxsExcept + :: Ord peeraddr + => peeraddr + -> SharedTxState peeraddr txid + -> Set.Set peeraddr +advertisersForPeerTxsExcept peeraddr SharedTxState { sharedTxTable } = + IntMap.foldl' collect Set.empty sharedTxTable + where + collect peers TxEntry { txAdvertisers } + | Map.member peeraddr txAdvertisers = + Set.union peers (Set.delete peeraddr (Map.keysSet txAdvertisers)) + | otherwise = + peers + +peerPhaseForActionIdle :: PeerAction -> PeerPhase +peerPhaseForActionIdle peerAction = + case peerAction of + PeerDoNothing {} -> PeerIdle + PeerSubmitTxs {} -> PeerSubmittingToMempool + PeerRequestTxs {} -> PeerWaitingTxs + PeerRequestTxIds {} -> PeerWaitingTxIds + +peerPhaseForActionPipelined + :: Ord peeraddr + => peeraddr + -> PeerAction + -> SharedTxState peeraddr txid + -> PeerPhase +peerPhaseForActionPipelined peeraddr peerAction sharedState = + case peerAction of + PeerDoNothing {} -> peerPhaseOf peeraddr sharedState + PeerSubmitTxs {} -> PeerSubmittingToMempool + PeerRequestTxs {} -> PeerWaitingTxs + PeerRequestTxIds {} -> PeerWaitingTxIds + where + peerPhaseOf peer st = + maybe PeerIdle sharedPeerPhase (Map.lookup peer (sharedPeers st)) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index d80c3910f69..86bd680c332 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -1,580 +1,1135 @@ -{-# LANGUAGE BlockArguments #-} -{-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} -{-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} module Ouroboros.Network.TxSubmission.Inbound.V2.State - ( -- * Core API - SharedTxState (..) - , PeerTxState (..) - , SharedTxStateVar - , newSharedTxStateVar - , receivedTxIds - , collectTxs - , acknowledgeTxIds - , splitAcknowledgedTxIds - , tickTimedTxs - , const_MAX_TX_SIZE_DISCREPANCY - -- * Internals, only exported for testing purposes: - , RefCountDiff (..) - , updateRefCounts - , receivedTxIdsImpl - , collectTxsImpl + ( handleReceivedTxIds + , handleReceivedTxs + , handleSubmittedTxs + , markSubmittingTxs + , nextPeerAction + , nextPeerActionPipelined ) where -import Control.Concurrent.Class.MonadSTM.Strict import Control.Exception (assert) -import Control.Monad.Class.MonadTime.SI -import Control.Tracer (Tracer, traceWith) - -import Data.Foldable (fold, toList) -import Data.Foldable qualified as Foldable -import Data.Functor (($>)) -import Data.Map.Merge.Strict qualified as Map -import Data.Map.Strict (Map) +import Control.Monad.Class.MonadTime.SI (DiffTime, Time, addTime, diffTime) +import Data.Foldable (foldl', toList) +import Data.IntMap.Strict qualified as IntMap +import Data.IntSet qualified as IntSet +import Data.List (sort) import Data.Map.Strict qualified as Map -import Data.Maybe (fromJust) -import Data.Sequence.Strict (StrictSeq) import Data.Sequence.Strict qualified as StrictSeq import Data.Set qualified as Set -import Data.Typeable (Typeable) -import GHC.Stack (HasCallStack) -import System.Random (StdGen) +import Data.Word (Word64) -import Ouroboros.Network.Protocol.TxSubmission2.Type (NumTxIdsToAck (..)) -import Ouroboros.Network.SizeInBytes (SizeInBytes (..)) +import Ouroboros.Network.Protocol.TxSubmission2.Type (NumTxIdsToAck, + SizeInBytes) import Ouroboros.Network.TxSubmission.Inbound.V2.Policy import Ouroboros.Network.TxSubmission.Inbound.V2.Types -import Ouroboros.Network.TxSubmission.Mempool.Reader (MempoolSnapshot (..)) +data TxIdRequestMode = AllowAnyTxIdRequests | AllowPipelinedTxIdRequests + deriving Eq +-- | Precomputed context for selecting the next action for one peer. -- --- Pure public API --- +data PeerActionContext peeraddr txid tx = PeerActionContext { + -- | Current time used for lease expiry and score decay decisions. + pacNow :: !Time, + -- | Decision policy that governs request, retry, and scoring limits. + pacPolicy :: !TxDecisionPolicy, + -- | Address of the peer whose next action is being chosen. + pacPeerAddr :: !peeraddr, + -- | Current peer-local state after local pruning has been applied. + pacPeerState :: !(PeerTxLocalState tx), + -- | Shared tx-submission state after shared pruning has been applied. + pacSharedState :: !(SharedTxState peeraddr txid), + -- | Decayed scores for peers that are currently idle and eligible to claim work. + pacIdlePeerScores :: !(Map.Map peeraddr Double) + } -acknowledgeTxIds - :: forall peeraddr tx txid. - Ord txid - => TxDecisionPolicy - -> SharedTxState peeraddr txid tx - -> PeerTxState txid tx - -> ( NumTxIdsToAck - , NumTxIdsToReq - , TxsToMempool txid tx - , RefCountDiff txid - , PeerTxState txid tx - ) - -- ^ number of txid to acknowledge, requests, txs which we can submit to the - -- mempool, txids to acknowledge with multiplicities, updated PeerTxState. -{-# INLINE acknowledgeTxIds #-} - -acknowledgeTxIds - policy - sharedTxState - ps@PeerTxState { availableTxIds, - unknownTxs, - requestedTxIdsInflight, - downloadedTxs, - score, - toMempoolTxs - } - = - -- We can only acknowledge txids when we can request new ones, since - -- a `MsgRequestTxIds` for 0 txids is a protocol error. - if txIdsToRequest > 0 - then - ( txIdsToAcknowledge - , txIdsToRequest - , TxsToMempool txsToMempool - , refCountDiff - , ps { unacknowledgedTxIds = unacknowledgedTxIds', - availableTxIds = availableTxIds', - unknownTxs = unknownTxs', - requestedTxIdsInflight = requestedTxIdsInflight - + txIdsToRequest, - downloadedTxs = downloadedTxs', - score = score', - toMempoolTxs = toMempoolTxs' } - ) - else - ( 0 - , 0 - , TxsToMempool txsToMempool - , RefCountDiff Map.empty - , ps { toMempoolTxs = toMempoolTxs' } - ) +data PeerActionChoice peeraddr = + ChooseSubmit ![TxKey] + | ChooseRequestTxs ![TxKey] !SizeInBytes !(IntMap.IntMap (TxEntry peeraddr)) + | ChooseRequestTxIds ![TxKey] !NumTxIdsToAck !NumTxIdsToReq !(StrictSeq.StrictSeq TxKey) + | ChooseDoNothing !Word64 !(Maybe DiffTime) + +-- | Build a precomputed context for selecting the next action for a peer. +-- +-- +mkPeerActionContext :: Ord txid + => Time + -> TxDecisionPolicy + -> peeraddr + -> PeerTxLocalState tx + -> SharedTxState peeraddr txid + -> PeerActionContext peeraddr txid tx +mkPeerActionContext now policy peeraddr peerState sharedState = + PeerActionContext { + pacNow = now, + pacPolicy = policy, + pacPeerAddr = peeraddr, + pacPeerState = peerState', + pacSharedState = sharedState', + pacIdlePeerScores = idlePeerScores + } where - -- Split `unacknowledgedTxIds'` into the longest prefix of `txid`s which - -- can be acknowledged and the unacknowledged `txid`s. - (txIdsToRequest, acknowledgedTxIds, unacknowledgedTxIds') - = splitAcknowledgedTxIds policy sharedTxState ps - - txsToMempool = [ (txid, downloadedTxs Map.! txid) - | txid <- toList toMempoolTxIds - , txid `Map.notMember` bufferedTxs sharedTxState - -- without the guard below we could potentially enqueue - -- the same tx into the mempool multiple times over - -- several decision loop iterations before the tx - -- is finally in the mempool, or rejected. - , txid `Map.notMember` toMempoolTxs - ] - -- Select downloaded txs from the prefix of `acknowledgedTxIds`, ignoring - -- unknown and buffered txs. - toMempoolTxIds = - StrictSeq.filter (`Map.member` downloadedTxs) acknowledgedTxIds - - txsToMempoolMap = Map.fromList txsToMempool - - toMempoolTxs' = toMempoolTxs <> txsToMempoolMap - - (downloadedTxs', ackedDownloadedTxs) = - Map.partitionWithKey (\txid _ -> txid `Set.member` liveSet) downloadedTxs - - -- latexTxs: transactions which were downloaded by another peer before we - -- downloaded them; it relies on that `txToMempool` filters out - -- `bufferedTxs`. - lateTxs = - Map.filterWithKey (\txid _ -> txid `Map.member` bufferedTxs sharedTxState) ackedDownloadedTxs - - score' = score + fromIntegral (Map.size lateTxs) - - -- the set of live `txids` - liveSet = Set.fromList (toList unacknowledgedTxIds') - - availableTxIds' = availableTxIds - `Map.restrictKeys` - liveSet - - -- We remove all acknowledged `txid`s which are not in - -- `unacknowledgedTxIds''`, but also return the unknown set before any - -- modifications (which is used to compute `unacknowledgedTxIds''` - -- above). - unknownTxs' = unknownTxs `Set.intersection` liveSet - - refCountDiff = RefCountDiff - $ foldr (Map.alter fn) - Map.empty acknowledgedTxIds + -- Remove expireds TX keys from the shared state + sharedState' = + let expiredRetainedKeys = retainedExpiredKeys now (sharedRetainedTxs sharedState) in + dropTxKeys expiredRetainedKeys sharedState + + -- Remove downloaded tx bodies that are no longer in the shared state. + peerState' = + peerState { + peerDownloadedTxs = IntMap.restrictKeys (peerDownloadedTxs peerState) (IntMap.keysSet (sharedTxTable sharedState')) + } + + idlePeerScores = + Map.mapMaybe toIdleScore (sharedPeers sharedState') where - fn :: Maybe Int -> Maybe Int - fn Nothing = Just 1 - fn (Just n) = Just $! n + 1 + toIdleScore SharedPeerState { sharedPeerPhase, sharedPeerScore } + | sharedPeerPhase == PeerIdle = Just (currentPeerScore policy now sharedPeerScore) + | otherwise = Nothing - txIdsToAcknowledge :: NumTxIdsToAck - txIdsToAcknowledge = fromIntegral $ StrictSeq.length acknowledgedTxIds +-- | Compute the next peer-local action. +nextPeerAction :: (Ord peeraddr, Ord txid) + => Time + -> TxDecisionPolicy + -> peeraddr + -> PeerTxLocalState tx + -> SharedTxState peeraddr txid + -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) +nextPeerAction = nextPeerActionWithMode AllowAnyTxIdRequests +-- | Pipelined version of nextPeerAction +nextPeerActionPipelined :: (Ord peeraddr, Ord txid) + => Time + -> TxDecisionPolicy + -> peeraddr + -> PeerTxLocalState tx + -> SharedTxState peeraddr txid + -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) +nextPeerActionPipelined = nextPeerActionWithMode AllowPipelinedTxIdRequests --- | Split unacknowledged txids into acknowledged and unacknowledged parts, also --- return number of txids which can be requested. +-- | V2 peer-thread scheduler -- -splitAcknowledgedTxIds - :: Ord txid - => TxDecisionPolicy - -> SharedTxState peer txid tx - -> PeerTxState txid tx - -> (NumTxIdsToReq, StrictSeq.StrictSeq txid, StrictSeq.StrictSeq txid) - -- ^ number of txids to request, acknowledged txids, unacknowledged txids -splitAcknowledgedTxIds - TxDecisionPolicy { - maxUnacknowledgedTxIds, - maxNumTxIdsToRequest - } - SharedTxState { - bufferedTxs - } - PeerTxState { - unacknowledgedTxIds, - unknownTxs, - downloadedTxs, - requestedTxsInflight, - requestedTxIdsInflight - } - = - (txIdsToRequest, acknowledgedTxIds', unacknowledgedTxIds') +-- nextPeerActionWithMode handles body requests for txs this peer may currently +-- fetch, tx submission for bodies buffered locally by this peer, and txid ack/request +-- messages. +nextPeerActionWithMode :: (Ord peeraddr, Ord txid) + => TxIdRequestMode + -> Time + -> TxDecisionPolicy + -> peeraddr + -> PeerTxLocalState tx + -> SharedTxState peeraddr txid + -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) +nextPeerActionWithMode txIdRequestMode now policy peeraddr peerState sharedState = + applyPeerActionChoice ctx (pickPeerActionChoice txIdRequestMode ctx) where - (acknowledgedTxIds', unacknowledgedTxIds') - = StrictSeq.spanl (\txid -> - txid `Set.notMember` requestedTxsInflight - && ( - txid `Map.member` downloadedTxs - || txid `Set.member` unknownTxs - || txid `Map.member` bufferedTxs - ) - ) - unacknowledgedTxIds - - numOfUnacked = StrictSeq.length unacknowledgedTxIds - numOfAcked = StrictSeq.length acknowledgedTxIds' - unackedAndRequested = fromIntegral numOfUnacked + requestedTxIdsInflight - - txIdsToRequest = - assert (unackedAndRequested <= maxUnacknowledgedTxIds) $ - assert (requestedTxIdsInflight <= maxNumTxIdsToRequest) $ - (maxUnacknowledgedTxIds - unackedAndRequested + fromIntegral numOfAcked) - `min` - (maxNumTxIdsToRequest - requestedTxIdsInflight) - - --- | `RefCountDiff` represents a map of `txid` which can be acknowledged --- together with their multiplicities. + ctx = mkPeerActionContext now policy peeraddr peerState sharedState + +-- | Pick which action to perform next. -- -newtype RefCountDiff txid = RefCountDiff { - txIdsToAck :: Map txid Int +pickPeerActionChoice :: Ord peeraddr + => TxIdRequestMode + -> PeerActionContext peeraddr txid tx + -> PeerActionChoice peeraddr +pickPeerActionChoice txIdRequestMode ctx + -- Pick TXs to submit to the mempool + | Just txsToSubmit <- pickSubmitAction ctx = + ChooseSubmit txsToSubmit + -- Pick TXs to fetch + | Just (txsToRequest, txsToRequestSize, txTable') <- pickRequestTxsAction ctx = + ChooseRequestTxs txsToRequest txsToRequestSize txTable' + -- Pick TXids to ack and/or request more TXids. + | Just (acknowledgedTxIds, txIdsToAcknowledge, txIdsToRequest, unacknowledgedTxIds') <- + pickRequestTxIdsAction txIdRequestMode ctx = + ChooseRequestTxIds acknowledgedTxIds txIdsToAcknowledge txIdsToRequest unacknowledgedTxIds' + -- Do nothing + | otherwise = + ChooseDoNothing (peerGenerationOf (pacPeerAddr ctx) (pacSharedState ctx)) (nextWakeDelay ctx) + +-- | Execute a chosen peer action and compute resulting state updates +applyPeerActionChoice :: (Ord peeraddr, Ord txid) + => PeerActionContext peeraddr txid tx + -> PeerActionChoice peeraddr + -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) +applyPeerActionChoice ctx choice = + case choice of + ChooseSubmit txsToSubmit -> + applySubmitChoice ctx txsToSubmit + ChooseRequestTxs txsToRequest txsToRequestSize txTable' -> + applyRequestTxsChoice ctx txsToRequest txsToRequestSize txTable' + ChooseRequestTxIds acknowledgedTxIds txIdsToAcknowledge txIdsToRequest + unacknowledgedTxIds' -> + applyRequestTxIdsChoice ctx acknowledgedTxIds txIdsToAcknowledge txIdsToRequest + unacknowledgedTxIds' + ChooseDoNothing generation wakeDelay -> + applyDoNothingChoice ctx generation wakeDelay + +-- | Construct a 'PeerSubmitTxs' action for buffered transactions. +applySubmitChoice :: PeerActionContext peeraddr txid tx + -> [TxKey] + -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) +applySubmitChoice ctx txsToSubmit = + ( PeerSubmitTxs txsToSubmit + , pacPeerState ctx + , pacSharedState ctx + ) + +-- | Construct a 'PeerRequestTxs' action and update local and shared tx state. +applyRequestTxsChoice :: Ord peeraddr + => PeerActionContext peeraddr txid tx + -> [TxKey] + -> SizeInBytes + -> IntMap.IntMap (TxEntry peeraddr) + -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) +applyRequestTxsChoice ctx txsToRequest txsToRequestSize txTable = + ( PeerRequestTxs txsToRequest + , peerState'' + , sharedState'' + ) + where + peerState'' = + (pacPeerState ctx) { + peerRequestedTxs = + foldl' (flip IntSet.insert) (peerRequestedTxs (pacPeerState ctx)) (unTxKey <$> txsToRequest), + peerRequestedTxBatches = + peerRequestedTxBatches (pacPeerState ctx) StrictSeq.|> RequestedTxBatch { + requestedTxBatchSet = IntSet.fromList (unTxKey <$> txsToRequest), + requestedTxBatchSize = txsToRequestSize + }, + peerRequestedTxsSize = peerRequestedTxsSize (pacPeerState ctx) + txsToRequestSize + } + sharedState'' = + bumpIdlePeerGenerations + (advertisersForKeysExcept (pacPeerAddr ctx) txTable txsToRequest) + ((pacSharedState ctx) { + sharedTxTable = txTable, + sharedGeneration = sharedGeneration (pacSharedState ctx) + 1 + }) + +-- | Construct a 'PeerRequestTxIds' action and update local and shared txid state. +applyRequestTxIdsChoice + :: (Ord peeraddr, Ord txid) + => PeerActionContext peeraddr txid tx + -> [TxKey] + -> NumTxIdsToAck + -> NumTxIdsToReq + -> StrictSeq.StrictSeq TxKey + -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) +applyRequestTxIdsChoice ctx acknowledgedTxIds txIdsToAcknowledge txIdsToRequest unacknowledgedTxIds' = + ( PeerRequestTxIds txIdsToAcknowledge txIdsToRequest + , peerState'' + , sharedState'' + ) + where + peerState'' = + (pacPeerState ctx) { + peerAvailableTxIds = + foldl' (flip IntMap.delete) (peerAvailableTxIds (pacPeerState ctx)) (unTxKey <$> acknowledgedTxIds), + peerUnacknowledgedTxIds = unacknowledgedTxIds', + peerRequestedTxIds = peerRequestedTxIds (pacPeerState ctx) + txIdsToRequest + } + sharedState'' = + acknowledgeTxIds (pacPeerAddr ctx) acknowledgedTxIds (pacSharedState ctx) + +-- | Construct a 'PeerDoNothing' action. +applyDoNothingChoice + :: PeerActionContext peeraddr txid tx + -> Word64 + -> Maybe DiffTime + -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) +applyDoNothingChoice ctx generation wakeDelay = + ( PeerDoNothing generation wakeDelay + , pacPeerState ctx + , pacSharedState ctx + ) + +-- | Select downloaded transactions that this peer may submit to the mempool. +pickSubmitAction + :: Ord peeraddr + => PeerActionContext peeraddr txid tx + -> Maybe [TxKey] +pickSubmitAction PeerActionContext { pacPeerAddr, pacPeerState, pacSharedState } = + let txsToSubmit = pickBufferedTxsToSubmit in + if null txsToSubmit + then Nothing + else Just txsToSubmit + where + + -- Filters the unacknowledged txid queue for bodies buffered by this peer + -- that are not currently being submitted by another advertiser. + -- Returns the list of tx keys ready for immediate submission in the order they + -- were originally advertised by the peer. + pickBufferedTxsToSubmit = + [ txKey + | txKey@(TxKey k) <- toList (peerUnacknowledgedTxIds pacPeerState) + , IntMap.member k (peerDownloadedTxs pacPeerState) + , Just txEntry <- [IntMap.lookup k (sharedTxTable pacSharedState)] + , txBufferedByPeer pacPeerAddr txEntry + , not (txSubmittingByOther pacPeerAddr txEntry) + ] + +-- | Select transactions to request from the peer, if within policy limits. +-- +-- Returns a triple of: +-- Tx keys to request (in ascending key order for deterministic selection) +-- Total serialized size of the requested txs +-- Updated shared state with new lease ownership for selected txs +pickRequestTxsAction :: Ord peeraddr + => PeerActionContext peeraddr txid tx + -> Maybe ([TxKey], SizeInBytes, IntMap.IntMap (TxEntry peeraddr)) +pickRequestTxsAction ctx@PeerActionContext { pacNow, pacPolicy, pacPeerState, pacSharedState } = + let (txsToRequest, txsToRequestSize, sharedState') = pickTxsToRequest in + if null txsToRequest + then Nothing + else Just (txsToRequest, txsToRequestSize, sharedState') + where + + -- Picks txs from the peer's available set that are not yet requested or + -- downloaded, assigning leases with expiry timestamps. + -- Respects 'maxOutstandingTxBatchesPerPeer' and 'txsSizeInflightPerPeer' policy + -- constraints. + pickTxsToRequest = + if StrictSeq.length (peerRequestedTxBatches pacPeerState) >= + maxOutstandingTxBatchesPerPeer pacPolicy + then ([], 0, sharedTxTable pacSharedState) + else go [] 0 (sharedTxTable pacSharedState) candidates + where + -- Remaining bytes available for requesting new tx, based on the + -- per-peer inflight size limit. + sizeBudget = + if peerRequestedTxsSize pacPeerState >= txsSizeInflightPerPeer pacPolicy + then 0 + else txsSizeInflightPerPeer pacPolicy - peerRequestedTxsSize pacPeerState + + leaseUntil = addTime (interTxSpace pacPolicy) pacNow + + -- We pick which TXs to download based on TxKey in ascending order. + -- This makes it likely (but not guaranteed) that we end up downloading + -- TXs in the order the peer presented them to us. + candidates = + [ (k, txSize) + | (k, txSize) <- IntMap.toAscList (peerAvailableTxIds pacPeerState) + , IntSet.notMember k (peerRequestedTxs pacPeerState) + , IntMap.notMember k (peerDownloadedTxs pacPeerState) + ] + + -- Select transactions to request by iterating through candidates in ascending + -- key order until the size budget is consumed. + go selectedRev selectedSize txTable [] = (reverse selectedRev, selectedSize, txTable) + go selectedRev selectedSize txTable ((k, txSize) : rest) = + if exceedsBudget selectedSize txSize + then (reverse selectedRev, selectedSize, txTable) + else + case IntMap.lookup k txTable of + Just txEntry -> + if txSelectable ctx txEntry + then + go (TxKey k : selectedRev) + (selectedSize + txSize) + (IntMap.insert k (claimTx (pacPeerAddr ctx) leaseUntil txEntry) + txTable) + rest + else go selectedRev selectedSize txTable rest + Nothing -> go selectedRev selectedSize txTable rest + + exceedsBudget selectedSize txSize + | selectedSize + txSize <= sizeBudget = False + | selectedSize /= 0 = True + -- The inflight size limit is soft by up to one tx size, so a peer with + -- spare capacity may still request its first tx in a batch even when + -- that single tx exceeds the remaining byte budget. + | otherwise = peerRequestedTxsSize pacPeerState >= txsSizeInflightPerPeer pacPolicy + +-- | Determine txid acknowledgment and request counts, if any work is available. +pickRequestTxIdsAction :: Ord peeraddr + => TxIdRequestMode + -> PeerActionContext peeraddr txid tx + -> Maybe ([TxKey], NumTxIdsToAck, NumTxIdsToReq, StrictSeq.StrictSeq TxKey) +pickRequestTxIdsAction txIdRequestMode ctx@PeerActionContext { pacPolicy, pacPeerState } + | txIdsToAcknowledge <= 0 && txIdsToRequest <= 0 = Nothing + | txIdRequestMode == AllowPipelinedTxIdRequests + , txIdsToAcknowledge <= 0 || txIdsToRequest <= 0 = Nothing + | otherwise = Just (acknowledgedTxIds, txIdsToAcknowledge, txIdsToRequest, unacknowledgedTxIds') + where + + -- Split the unacknowledged txid queue into acknowledged and remaining portions. + -- + -- acknowledgedTxIds is the longest prefix of "ackable" txids. + -- unacknowledgedTxIds is the remaining txids + (acknowledgedTxIds, txIdsToAcknowledge, txIdsToRequest, unacknowledgedTxIds') = + ( toList acknowledgedTxIdsSeq + , fromIntegral numOfAcked + , txIdsToRequest' + , unacknowledgedTxIds + ) + where + ackablePrefix = StrictSeq.takeWhileL (txIdAckable ctx) (peerUnacknowledgedTxIds + pacPeerState) + + numOfUnacked = StrictSeq.length (peerUnacknowledgedTxIds pacPeerState) + numOfRequested = fromIntegral (peerRequestedTxIds pacPeerState) :: Int + numOfAcked0 = StrictSeq.length ackablePrefix + numOfAcked + | numOfRequested > 0 = min numOfAcked0 (max 0 (numOfUnacked - 1)) + | otherwise = numOfAcked0 + + acknowledgedTxIdsSeq = StrictSeq.take numOfAcked ackablePrefix + unacknowledgedTxIds = StrictSeq.drop numOfAcked (peerUnacknowledgedTxIds pacPeerState) + unackedAndRequested = numOfUnacked + numOfRequested + + txIdsToRequest' + | numOfAcked == 0 && numOfUnacked > 0 = 0 + | otherwise = + fromIntegral $ max 0 $ min + (fromIntegral (maxUnacknowledgedTxIds pacPolicy) - unackedAndRequested + numOfAcked) + (fromIntegral (maxNumTxIdsToRequest pacPolicy) - numOfRequested) + +-- | Compute the time delay until the peer should next wake to check for work. +nextWakeDelay :: Ord peeraddr + => PeerActionContext peeraddr txid tx + -> Maybe DiffTime +nextWakeDelay PeerActionContext { pacNow, pacPeerAddr, pacSharedState } = + (`diffTime` pacNow) <$> minMaybe nextLeaseWake nextRetainWake + where + nextLeaseWake = + IntMap.foldl' stepLease Nothing (sharedTxTable pacSharedState) + + stepLease acc txEntry@TxEntry { txLease } = + if Map.member pacPeerAddr (txAdvertisers txEntry) + then minMaybe acc (futureLeaseWake txLease) + else acc + + nextRetainWake = retainedNextWake pacNow (sharedRetainedTxs pacSharedState) + + futureLeaseWake TxClaimable = Nothing + futureLeaseWake (TxLeased _ leaseUntil) + | leaseUntil > pacNow = Just leaseUntil + | otherwise = Nothing + + minMaybe :: Ord a => Maybe a -> Maybe a -> Maybe a + minMaybe Nothing y = y + minMaybe x Nothing = x + minMaybe (Just x) (Just y) = Just (min x y) + +-- | Assign a tx lease to a peer and mark it as downloading. +claimTx :: Ord peeraddr + => peeraddr + -> Time + -> TxEntry peeraddr + -> TxEntry peeraddr +claimTx peeraddr leaseUntil txEntry@TxEntry { txAdvertisers, txAttempts } = + txEntry { + txLease = TxLeased peeraddr leaseUntil, + txAdvertisers = Map.adjust setAckWhenBuffered peeraddr txAdvertisers, + txAttempts = Map.insert peeraddr TxDownloading txAttempts } + where + setAckWhenBuffered advertiser = advertiser { txAckState = AckWhenBuffered } -updateRefCounts :: Ord txid - => Map txid Int - -> RefCountDiff txid - -> Map txid Int -updateRefCounts referenceCounts (RefCountDiff diff) = - Map.merge (Map.mapMaybeMissing \_ x -> Just x) - (Map.mapMaybeMissing \_ _ -> Nothing) - (Map.zipWithMaybeMatched \_ x y -> assert (x >= y) - if x > y then Just $! x - y - else Nothing) - referenceCounts - diff - - -tickTimedTxs :: forall peeraddr tx txid. - (Ord txid) - => Time - -> SharedTxState peeraddr txid tx - -> SharedTxState peeraddr txid tx -tickTimedTxs now st@SharedTxState{ timedTxs - , referenceCounts - , bufferedTxs } = - let (expiredTxs', timedTxs') = - case Map.splitLookup now timedTxs of - (expired, Just txids, timed) -> - (expired, -- Map.split doesn't include the `now` entry in the map - Map.insert now txids timed) - (expired, Nothing, timed) -> - (expired, timed) - refDiff = Map.foldl' fn Map.empty expiredTxs' - referenceCounts' = updateRefCounts referenceCounts (RefCountDiff refDiff) - liveSet = Map.keysSet referenceCounts' - bufferedTxs' = bufferedTxs `Map.restrictKeys` liveSet in - st { timedTxs = timedTxs' - , referenceCounts = referenceCounts' - , bufferedTxs = bufferedTxs' - } +-- | Determine if a tx is eligible for this peer to request. +-- +-- A tx is selectable if it can be claimed or is already owned by this peer +-- and still being advertised. +txSelectable :: Ord peeraddr + => PeerActionContext peeraddr txid tx + -> TxEntry peeraddr + -> Bool +txSelectable PeerActionContext { pacNow, pacPeerAddr, pacPolicy, pacIdlePeerScores } + txEntry@TxEntry { txAdvertisers, txTieBreakSalt } + | txSubmittingAnywhere txEntry = False + | txPeerHasAttempt = False + | txActiveAttemptCount txEntry >= txInflightMultiplicity pacPolicy = False + | txOwnedByPeer txEntry && Map.member pacPeerAddr txAdvertisers = True + | otherwise = + let peerMayClaim = Map.member pacPeerAddr txAdvertisers && + (case pickClaimOwner of + Just owner -> owner == pacPeerAddr + Nothing -> False) in + case txLease txEntry of + TxClaimable -> peerMayClaim + TxLeased _ leaseExpiry -> (leaseExpiry <= pacNow) && peerMayClaim where - fn :: Map txid Int - -> [txid] - -> Map txid Int - fn m txids = Foldable.foldl' gn m txids - gn :: Map txid Int - -> txid - -> Map txid Int - gn m txid = Map.alter af txid m + -- Select which idle advertiser should claim a tx lease based on + -- peer score. + pickClaimOwner = + case eligiblePeers of + [] -> Nothing + _ -> Just (pickBestPeer eligiblePeers) + where + eligiblePeers = + [ (candidate, score) + | candidate <- Map.keys txAdvertisers + , Just score <- [Map.lookup candidate pacIdlePeerScores] + ] + + pickBestPeer peers = + case sort [ candidate | (candidate, score) <- peers, score == bestScore ] of + [] -> assert False pacPeerAddr + tied -> tied !! (txTieBreakSalt `mod` length tied) + where + bestScore = minimum [ score | (_, score) <- peers ] + + -- txOwnedByPeer :: TxEntry peeraddr -> Bool + txOwnedByPeer TxEntry { txLease = TxLeased owner _ } = owner == pacPeerAddr + txOwnedByPeer TxEntry { txLease = TxClaimable } = False + + txPeerHasAttempt = + case txAttemptOfPeer pacPeerAddr txEntry of + Just TxNoAttempt -> False + Just _ -> True + Nothing -> False + + txActiveAttemptCount :: TxEntry peeraddr -> Int + txActiveAttemptCount TxEntry { txAttempts } = + length + [ () + | attempt <- Map.elems txAttempts + , attempt == TxDownloading || attempt == TxBuffered + ] - af :: Maybe Int - -> Maybe Int - af Nothing = Just 1 - af (Just n) = Just $! succ n +-- | Extract the peer's TxAttemptState for the TX entry, if it exists. +txAttemptOfPeer :: Ord peeraddr => peeraddr -> TxEntry peeraddr -> Maybe TxAttemptState +txAttemptOfPeer peeraddr TxEntry { txAttempts } = Map.lookup peeraddr txAttempts + +-- | Does the peer have the TX entry buffered? +txBufferedByPeer :: Ord peeraddr => peeraddr -> TxEntry peeraddr -> Bool +txBufferedByPeer peeraddr txEntry = + txAttemptOfPeer peeraddr txEntry == Just TxBuffered + +-- | Check whether some other peer is already submitting this tx. -- --- Pure internal API +-- Uses a single fold over 'txAttempts' and short-circuits at the first +-- matching 'TxSubmitting'. +txSubmittingByOther :: Eq peeraddr => peeraddr -> TxEntry peeraddr -> Bool +txSubmittingByOther peeraddr TxEntry { txAttempts } = + Map.foldrWithKey + (\owner attempt acc -> (owner /= peeraddr && attempt == TxSubmitting) || acc) + False + txAttempts + +-- | Check whether any peer is currently submitting this tx. -- +-- Like 'txSubmittingByOther', this short-circuits at the first 'TxSubmitting'. +txSubmittingAnywhere :: TxEntry peeraddr -> Bool +txSubmittingAnywhere TxEntry { txAttempts } = + Map.foldr (\attempt acc -> attempt == TxSubmitting || acc) False txAttempts + --- | Insert received `txid`s and return the number of txids to be acknowledged --- and the updated `SharedTxState`. +-- | Compute the current usefulness score for a peer after time-based decay. -- -receivedTxIdsImpl - :: forall peeraddr tx txid. - (Ord txid, Ord peeraddr, HasCallStack) - => (txid -> Bool) -- ^ check if txid is in the mempool, ref - -- 'mempoolHasTx' - -> peeraddr - -> NumTxIdsToReq - -- ^ number of requests to subtract from - -- `requestedTxIdsInflight` - - -> StrictSeq txid - -- ^ sequence of received `txids` - -> Map txid SizeInBytes - -- ^ received `txid`s with sizes - - -> SharedTxState peeraddr txid tx - -> SharedTxState peeraddr txid tx - -receivedTxIdsImpl - mempoolHasTx - peeraddr reqNo txidsSeq txidsMap - st@SharedTxState{ peerTxStates, - bufferedTxs, - referenceCounts } - = - -- using `alterF` so the update of `PeerTxState` is done in one lookup - case Map.alterF (fmap Just . fn . fromJust) - peeraddr - peerTxStates of - ( st', peerTxStates' ) -> - st' { peerTxStates = peerTxStates' } +-- Scores drain at 'scoreRate' (txs/second) from the last update timestamp. +-- Returns zero for peers whose accumulated rejections have fully decayed. +currentPeerScore :: TxDecisionPolicy + -> Time + -> PeerScore + -> Double +currentPeerScore TxDecisionPolicy { scoreRate } currentTime + PeerScore { peerScoreValue, peerScoreTs } + | peerScoreValue == 0 = 0 + | currentTime == peerScoreTs = peerScoreValue + | otherwise = max 0 $ peerScoreValue - realToFrac (diffTime currentTime peerScoreTs) * scoreRate +-- | Acknowledge txids from a peer and update shared state. +acknowledgeTxIds :: (Ord peeraddr, Ord txid) + => peeraddr + -> [TxKey] + -> SharedTxState peeraddr txid + -> SharedTxState peeraddr txid +acknowledgeTxIds _ [] st = st +acknowledgeTxIds peeraddr acknowledgedTxIds st = + foldl' acknowledgeOne st' acknowledgedTxIds where - -- update `PeerTxState` and return number of `txid`s to acknowledged and - -- updated `SharedTxState`. - fn :: PeerTxState txid tx - -> ( SharedTxState peeraddr txid tx - , PeerTxState txid tx - ) - fn ps@PeerTxState { availableTxIds, - requestedTxIdsInflight, - unacknowledgedTxIds } = - (st', ps') - where - -- - -- Handle new `txid`s - -- - - -- Divide the new txids in two: those that are already in the mempool - -- and those that are not. We'll request some txs from the latter. - (ignoredTxIds, availableTxIdsMap) = - Map.partitionWithKey - (\txid _ -> mempoolHasTx txid) - txidsMap - - -- Add all `txids` from `availableTxIdsMap` which are not - -- unacknowledged or already buffered. Unacknowledged txids must have - -- already been added to `availableTxIds` map before. - availableTxIds' = - Map.foldlWithKey - (\m txid sizeInBytes -> Map.insert txid sizeInBytes m) - availableTxIds - (Map.filterWithKey - (\txid _ -> txid `notElem` unacknowledgedTxIds - && txid `Map.notMember` bufferedTxs) - availableTxIdsMap) - - -- Add received txids to `unacknowledgedTxIds`. - unacknowledgedTxIds' = unacknowledgedTxIds <> txidsSeq - - -- Add ignored `txs` to buffered ones. - -- Note: we prefer to keep the `tx` if it's already in `bufferedTxs`. - bufferedTxs' = bufferedTxs - <> Map.map (const Nothing) ignoredTxIds - - referenceCounts' = - Foldable.foldl' - (flip $ Map.alter (\case - Nothing -> Just $! 1 - Just cnt -> Just $! succ cnt)) - referenceCounts - txidsSeq - - st' = st { bufferedTxs = bufferedTxs', - referenceCounts = referenceCounts' } - ps' = assert (requestedTxIdsInflight >= reqNo) - ps { availableTxIds = availableTxIds', - unacknowledgedTxIds = unacknowledgedTxIds', - requestedTxIdsInflight = requestedTxIdsInflight - reqNo } - --- | We check advertised sizes up in a fuzzy way. The advertised and received --- sizes need to agree up to `const_MAX_TX_SIZE_DISCREPANCY`. + st' = st { sharedGeneration = sharedGeneration st + 1 } + + removeAdvertiser txEntry@TxEntry { txAdvertisers } = + txEntry { txAdvertisers = Map.delete peeraddr txAdvertisers } + + acknowledgeOne acc (TxKey k) = + case IntMap.lookup k (sharedTxTable acc) of + Just txEntry -> + let txEntry' = removeAdvertiser txEntry in + if activeTxLive txEntry' + then acc { sharedTxTable = IntMap.insert k txEntry' (sharedTxTable acc) } + else dropTxKey k acc + Nothing -> acc + +-- | Determine if an unacknowledged txid is ready to be acknowledged. -- -const_MAX_TX_SIZE_DISCREPANCY :: SizeInBytes -const_MAX_TX_SIZE_DISCREPANCY = 32 - -collectTxsImpl - :: forall peeraddr tx txid. - ( Ord peeraddr - , Ord txid - , Show txid - , Typeable txid - ) - => (tx -> SizeInBytes) -- ^ compute tx size - -> peeraddr - -> Map txid SizeInBytes -- ^ requested txids - -> Map txid tx -- ^ received txs - -> SharedTxState peeraddr txid tx - -> Either TxSubmissionProtocolError - (SharedTxState peeraddr txid tx) - -- ^ Return list of `txid` which sizes didn't match or a new state. - -- If one of the `tx` has wrong size, we return an error. The - -- mini-protocol will throw, which will clean the state map from this peer. -collectTxsImpl txSize peeraddr requestedTxIdsMap receivedTxs - st@SharedTxState { peerTxStates } = - - -- using `alterF` so the update of `PeerTxState` is done in one lookup - case Map.alterF (fmap Just . fn . fromJust) - peeraddr - peerTxStates of - (Right st', peerTxStates') -> - Right st' { peerTxStates = peerTxStates' } - (Left e, _) -> - Left $ ProtocolErrorTxSizeError e +-- A txid remains ackable after it has been resolved and removed from shared +-- state. The wire protocol only needs an ack count for the peer-local +-- unacknowledged prefix, so a late ack is still safe even after the active +-- entry and retained marker have been pruned. +txIdAckable :: Ord peeraddr + => PeerActionContext peeraddr txid tx + -> TxKey + -> Bool +txIdAckable PeerActionContext { pacPeerAddr, pacPeerState, pacSharedState } (TxKey k) + | retainedMember k (sharedRetainedTxs pacSharedState) = True + | otherwise = + case IntMap.lookup k (sharedTxTable pacSharedState) of + Just txEntry -> + case Map.lookup pacPeerAddr (txAdvertisers txEntry) of + Just TxAdvertiser { txAckState = AckWhenBuffered } -> + -- Ack the txid if we downloaded it and no other + -- peer is in the process of submitting it to the + -- mempool. + IntMap.member k (peerDownloadedTxs pacPeerState) + && not (txBufferedByPeer pacPeerAddr txEntry + && txSubmittingByOther pacPeerAddr txEntry) + Just TxAdvertiser { txAckState = AckWhenResolved } -> + False -- This becomes ackable once the tx is retained or later pruned. + Nothing -> True -- Safe late ack after this peer was pruned from the shared entry. + Nothing -> True -- Safe late ack after the resolved tx was pruned from shared state. +-- | Remove one transaction entry from all shared state maps by key. +dropTxKey :: Ord txid + => Int + -> SharedTxState peeraddr txid + -> SharedTxState peeraddr txid +dropTxKey k st@SharedTxState { sharedTxTable, sharedRetainedTxs, sharedTxIdToKey + , sharedKeyToTxId } = + st { + sharedTxTable = IntMap.delete k sharedTxTable, + sharedRetainedTxs = retainedDeleteKeys (IntSet.singleton k) sharedRetainedTxs, + sharedTxIdToKey = deleteTxId sharedTxIdToKey, + sharedKeyToTxId = IntMap.delete k sharedKeyToTxId + } where - -- Update `PeerTxState` and partially update `SharedTxState` (except of - -- `peerTxStates`). - fn :: PeerTxState txid tx - -> ( Either [(txid, SizeInBytes, SizeInBytes)] - (SharedTxState peeraddr txid tx) - , PeerTxState txid tx - ) - fn ps = - case wrongSizedTxs of - [] -> ( Right st' - , ps'' - ) - _ -> ( Left wrongSizedTxs - , ps - ) - where - wrongSizedTxs :: [(txid, SizeInBytes, SizeInBytes)] - wrongSizedTxs = - map (\(a, (b,c)) -> (a,b,c)) - . Map.toList - $ Map.merge - Map.dropMissing - Map.dropMissing - (Map.zipWithMaybeMatched \_ receivedSize advertisedSize -> - if receivedSize `checkTxSize` advertisedSize - then Nothing - else Just (receivedSize, advertisedSize) - ) - (txSize `Map.map` receivedTxs) - requestedTxIdsMap - - checkTxSize :: SizeInBytes - -> SizeInBytes - -> Bool - checkTxSize received advertised - = abs (received - advertised) <= const_MAX_TX_SIZE_DISCREPANCY - - requestedTxIds = Map.keysSet requestedTxIdsMap - notReceived = requestedTxIds Set.\\ Map.keysSet receivedTxs - downloadedTxs' = downloadedTxs ps <> receivedTxs - -- Add not received txs to `unknownTxs` before acknowledging txids. - unknownTxs' = unknownTxs ps <> notReceived - - requestedTxsInflight' = - assert (requestedTxIds `Set.isSubsetOf` requestedTxsInflight ps) $ - requestedTxsInflight ps Set.\\ requestedTxIds - - requestedSize = fold $ availableTxIds ps `Map.restrictKeys` requestedTxIds - requestedTxsInflightSize' = - assert (requestedTxsInflightSize ps >= requestedSize) $ - requestedTxsInflightSize ps - requestedSize - - -- subtract requested from in-flight - inflightTxs'' = - Map.merge - (Map.mapMaybeMissing \_ x -> Just x) - (Map.mapMaybeMissing \_ _ -> assert False Nothing) - (Map.zipWithMaybeMatched \_ x y -> assert (x >= y) - let z = x - y in - if z > 0 - then Just z - else Nothing) - (inflightTxs st) - (Map.fromSet (const 1) requestedTxIds) - - st' = st { inflightTxs = inflightTxs'' } - - -- - -- Update PeerTxState - -- - - -- Remove the downloaded `txid`s from the availableTxIds map, this - -- guarantees that we won't attempt to download the `txids` from this peer - -- once we collect the `txid`s. Also restrict keys to `liveSet`. - -- - -- NOTE: we could remove `notReceived` from `availableTxIds`; and - -- possibly avoid using `unknownTxs` field at all. - -- - availableTxIds'' = availableTxIds ps - `Map.withoutKeys` - requestedTxIds - - -- Remove all acknowledged `txid`s from unknown set, but only those - -- which are not present in `unacknowledgedTxIds'` - unknownTxs'' = unknownTxs' - `Set.intersection` - live - where - -- We cannot use `liveSet` as `unknown <> notReceived` might - -- contain `txids` which are in `liveSet` but are not `live`. - live = Set.fromList (toList (unacknowledgedTxIds ps)) + deleteTxId txIdToKey = + case IntMap.lookup k sharedKeyToTxId of + Just txid -> Map.delete txid txIdToKey + Nothing -> txIdToKey + +-- | Remove transaction entries from all shared state maps by key. +dropTxKeys :: Ord txid + => IntSet.IntSet + -> SharedTxState peeraddr txid + -> SharedTxState peeraddr txid +dropTxKeys keys st@SharedTxState { sharedTxTable, sharedRetainedTxs, sharedTxIdToKey + , sharedKeyToTxId } + | IntSet.null keys = st + | otherwise = + st { + sharedTxTable = IntMap.withoutKeys sharedTxTable keys, + sharedRetainedTxs = retainedDeleteKeys keys sharedRetainedTxs, + sharedTxIdToKey = IntSet.foldl' deleteTxId sharedTxIdToKey keys, + sharedKeyToTxId = IntMap.withoutKeys sharedKeyToTxId keys + } + where + deleteTxId txIdToKey k = + case IntMap.lookup k sharedKeyToTxId of + Just txid -> Map.delete txid txIdToKey + Nothing -> txIdToKey - ps'' = ps { availableTxIds = availableTxIds'', - unknownTxs = unknownTxs'', - requestedTxsInflightSize = requestedTxsInflightSize', - requestedTxsInflight = requestedTxsInflight', - downloadedTxs = downloadedTxs' } +-- | Remove transaction keys that are no longer active from the shared state. +dropDeadActiveKeys :: Ord txid + => IntSet.IntSet + -> SharedTxState peeraddr txid + -> SharedTxState peeraddr txid +dropDeadActiveKeys keys st@SharedTxState { sharedTxTable } = + let deadKeys = IntSet.filter isDead keys in + dropTxKeys deadKeys st + where + isDead k = + case IntMap.lookup k sharedTxTable of + Just txEntry -> not (activeTxLive txEntry) + Nothing -> False +-- | Is the TX entry alive? -- --- Monadic public API +-- A TX entry is alive if there is a lease, there are advertisers for it or there are +-- download attempts for it. +activeTxLive :: TxEntry peeraddr -> Bool +activeTxLive TxEntry { txLease, txAdvertisers, txAttempts } = + leaseLive txLease + || not (Map.null txAdvertisers) + || not (Map.null txAttempts) + where + leaseLive TxClaimable = False + leaseLive TxLeased {} = True + + +-- | Collect the advertisers of the given active tx keys, excluding one peer. +advertisersForKeysExcept + :: Ord peeraddr + => peeraddr + -> IntMap.IntMap (TxEntry peeraddr) + -> [TxKey] + -> Set.Set peeraddr +advertisersForKeysExcept currentPeer txTable = + foldl' collectAdvertisers Set.empty + where + collectAdvertisers peers (TxKey k) = + case IntMap.lookup k txTable of + Just txEntry -> + Set.union peers (advertisersForEntryExcept currentPeer txEntry) + Nothing -> + peers + +-- | Get all advertisers for a transaction entry, excluding a specific peer. +advertisersForEntryExcept :: Ord peeraddr + => peeraddr + -> TxEntry peeraddr + -> Set.Set peeraddr +advertisersForEntryExcept currentPeer TxEntry { txAdvertisers } = + Set.delete currentPeer (Map.keysSet txAdvertisers) + + +-- | Handle a batch of tx bodies received from one peer. -- +-- Received bodies are buffered locally in the peer state. Bodies that are +-- already retained or already in the mempool are counted as late and dropped. +-- Any requested tx omitted from the reply releases this peer's ownership. +-- Late TXs contributes to the returned penalty count. +handleReceivedTxs :: (Ord peeraddr, Ord txid) + => (txid -> Bool) + -> Time + -> TxDecisionPolicy + -> peeraddr + -> [(txid, tx)] + -> PeerTxLocalState tx + -> SharedTxState peeraddr txid + -> (Int, Int, PeerTxLocalState tx, SharedTxState peeraddr txid) +handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = + (omittedCount, lateCount, peerState', sharedState') + where + txidToKey = sharedTxIdToKey sharedState -type SharedTxStateVar m peeraddr txid tx = StrictTVar m (SharedTxState peeraddr txid tx) - -newSharedTxStateVar :: MonadSTM m - => StdGen - -> m (SharedTxStateVar m peeraddr txid tx) -newSharedTxStateVar rng = newTVarIO SharedTxState { - peerTxStates = Map.empty, - inflightTxs = Map.empty, - bufferedTxs = Map.empty, - referenceCounts = Map.empty, - timedTxs = Map.empty, - inSubmissionToMempoolTxs = Map.empty, - peerRng = rng - } + requestedKeys = requestedTxBatchSet requestedBatch + retainUntil = addTime (bufferedTxsMinLifetime policy) now + + -- Dequeue the next requested tx batch to process. + (requestedBatch, remainingRequestedBatches) = + case peerRequestedTxBatches peerState of + StrictSeq.Empty -> + error "TxSubmission.Inbound.V2.handleReceivedTxs null requestedBatches" + batch StrictSeq.:<| batches -> + (batch, batches) + + -- Process each received tx, collecting late counts, pending requests, + -- updated tables, and peers to wake up. + ( lateCount + , pendingRequestedKeys + , txTableHandled + , retainedHandled + , receivedWakePeers + , peerDownloadedTxs' + ) = + foldl' + handleOne + ( 0 + , requestedKeys + , sharedTxTable sharedState + , sharedRetainedTxs sharedState + , Set.empty + , peerDownloadedTxs peerState + ) + txs + + -- Process omitted (not received) txs: count a penalty for every omitted + -- request, release ownership for keys that are still live, and collect + -- peers to wake up. + (omittedCount, txTableReleased, omittedWakePeers) = + IntSet.foldl' handleOmitted (0, txTableHandled, Set.empty) pendingRequestedKeys + + -- Build the final shared state with updated tables and cleaned-up dead entries. + sharedState'' = + dropDeadActiveKeys pendingRequestedKeys sharedState { + sharedTxTable = txTableReleased, + sharedRetainedTxs = retainedHandled, + sharedGeneration = sharedGeneration sharedState + 1 + } + + -- Update peer state: remove processed keys, update batch tracking, and record + -- downloaded txs. + peerState' = peerState { + peerAvailableTxIds = + IntSet.foldl' (flip IntMap.delete) (peerAvailableTxIds peerState) requestedKeys + , peerRequestedTxs = peerRequestedTxs peerState `IntSet.difference` requestedKeys + , peerRequestedTxBatches = remainingRequestedBatches + , peerRequestedTxsSize = peerRequestedTxsSize peerState - requestedTxBatchSize requestedBatch + , peerDownloadedTxs = peerDownloadedTxs' + } + + -- Flag peers that may now have work available after processing txs. + sharedState' = + bumpIdlePeerGenerations + (Set.union receivedWakePeers omittedWakePeers) + sharedState'' + + keyWasLive k = + IntMap.member k (sharedTxTable sharedState) + || retainedMember k (sharedRetainedTxs sharedState) + + + -- Fold function over received txs: classify as late, already in mempool, or buffer for + -- download. + handleOne + ( lateCountAcc + , pendingKeysAcc + , txTableAcc + , retainedAcc + , wakePeersAcc + , downloadedAcc + ) + (txid, tx) = + case Map.lookup txid txidToKey of + Nothing -> + ( lateCountAcc + 1 + , pendingKeysAcc + , txTableAcc + , retainedAcc + , wakePeersAcc + , downloadedAcc + ) + Just (TxKey k) + | retainedMember k retainedAcc -> + ( lateCountAcc + 1 + , IntSet.delete k pendingKeysAcc + , IntMap.delete k txTableAcc + , retainedAcc + , wakePeersAcc + , downloadedAcc + ) + | mempoolHasTx txid -> + let wakePeers = + case IntMap.lookup k txTableAcc of + Just txEntry -> + Set.union (advertisersForEntryExcept peeraddr txEntry) + wakePeersAcc + Nothing -> + wakePeersAcc in + ( lateCountAcc + 1 + , IntSet.delete k pendingKeysAcc + , IntMap.delete k txTableAcc + , retainedInsertMax k retainUntil retainedAcc + , wakePeers + , downloadedAcc + ) + | otherwise -> + case IntMap.lookup k txTableAcc of + Just txEntry + | peerHasAttempt txEntry -> + ( lateCountAcc + , IntSet.delete k pendingKeysAcc + , IntMap.insert k (markBuffered txEntry) txTableAcc + , retainedAcc + , wakePeersAcc + , IntMap.insert k tx downloadedAcc + ) + _ -> + ( lateCountAcc + 1 + , IntSet.delete k pendingKeysAcc + , txTableAcc + , retainedAcc + , wakePeersAcc + , downloadedAcc + ) + + -- Handle omitted (not received) txs: release ownership, count penalties, + -- and wake up other advertisers if the tx is still active. + handleOmitted (omittedCountAcc, txTableAcc, wakePeersAcc) k + | keyWasLive k = + let txTableAcc' = releaseOne txTableAcc k + wakePeersAcc' = + case IntMap.lookup k txTableAcc' of + Just txEntry + | activeTxLive txEntry -> + Set.union (advertisersForEntryExcept peeraddr txEntry) wakePeersAcc + _ -> wakePeersAcc in + (omittedCountAcc + 1, txTableAcc', wakePeersAcc') + | otherwise = + (omittedCountAcc + 1, txTableAcc, wakePeersAcc) + + peerHasAttempt TxEntry { txAttempts } = + Map.member peeraddr txAttempts + + markBuffered txEntry@TxEntry { txAttempts } = + txEntry { txAttempts = Map.insert peeraddr TxBuffered txAttempts } + + releaseOne txTable k = + IntMap.adjust releaseLease k txTable + + releaseLease txEntry@TxEntry { txLease, txAdvertisers, txAttempts } = + txEntry { + txLease = case txLease of + TxLeased owner _ | owner == peeraddr -> TxClaimable + _ -> txLease, + txAdvertisers = Map.delete peeraddr txAdvertisers, + txAttempts = Map.delete peeraddr txAttempts + } --- | Acknowledge `txid`s, return the number of `txids` to be acknowledged to the --- remote side. +-- | Handle the result of submitting buffered txs to the mempool. -- -receivedTxIds - :: forall m peeraddr idx tx txid. - (MonadSTM m, Ord txid, Ord peeraddr) - => Tracer m (TraceTxLogic peeraddr txid tx) - -> SharedTxStateVar m peeraddr txid tx - -> STM m (MempoolSnapshot txid tx idx) - -> peeraddr - -> NumTxIdsToReq - -- ^ number of requests to subtract from - -- `requestedTxIdsInflight` - -> StrictSeq txid - -- ^ sequence of received `txids` - -> Map txid SizeInBytes - -- ^ received `txid`s with sizes - -> m () -receivedTxIds tracer sharedVar getMempoolSnapshot peeraddr reqNo txidsSeq txidsMap = do - st <- atomically $ do - MempoolSnapshot{mempoolHasTx} <- getMempoolSnapshot - stateTVar sharedVar ((\a -> (a,a)) . receivedTxIdsImpl mempoolHasTx peeraddr reqNo txidsSeq txidsMap) - traceWith tracer (TraceSharedTxState "receivedTxIds" st) - - --- | Include received `tx`s in `SharedTxState`. Return number of `txids` --- to be acknowledged and list of `tx` to be added to the mempool. +-- Accepted txs leave the active table and move into the retained set so later +-- txid advertisements can be acknowledged without re-requesting the body. +-- Txs rejected by the mempool release this peer's attempt state and advertiser +-- slot so another advertiser may try later. +handleSubmittedTxs :: (Ord peeraddr, Ord txid) + => Time + -> TxDecisionPolicy + -> peeraddr + -> [TxKey] + -> [TxKey] + -> PeerTxLocalState tx + -> SharedTxState peeraddr txid + -> (PeerTxLocalState tx, SharedTxState peeraddr txid) +handleSubmittedTxs now policy peeraddr acceptedTxs rejectedTxs peerState sharedState = + (peerState', sharedState') + where + acceptedKeys = IntSet.fromList (unTxKey <$> acceptedTxs) + rejectedKeys = IntSet.fromList (unTxKey <$> rejectedTxs) + submittedKeys = acceptedKeys `IntSet.union` rejectedKeys + + peerState' = peerState { + peerDownloadedTxs = + IntSet.foldl' (flip IntMap.delete) (peerDownloadedTxs peerState) submittedKeys + } + + (acceptedAdvertisers, activeTableAfterAccepted, retainedTxs') = acceptSubmittedTxs + + rejectedActive = IntSet.foldl' updateRejected activeTableAfterAccepted rejectedKeys + + sharedState' = + bumpIdlePeerGenerations + (Set.union acceptedAdvertisers (advertisersForKeysExcept peeraddr txTable' (fmap TxKey (IntSet.toList rejectedKeys)))) + sharedState'' + + sharedState'' = + dropDeadActiveKeys rejectedKeys sharedState { + sharedTxTable = rejectedActive, + sharedRetainedTxs = retainedTxs', + sharedGeneration = sharedGeneration sharedState + 1 + } + + txTable' = sharedTxTable sharedState'' + + retainedUntil = addTime (bufferedTxsMinLifetime policy) now + + acceptSubmittedTxs = + ( advertisersForKeysExcept peeraddr (sharedTxTable sharedState) acceptedTxs + , IntMap.withoutKeys (sharedTxTable sharedState) acceptedKeys + , IntSet.foldl' + (\retained k -> retainedInsertMax k retainedUntil retained) + (sharedRetainedTxs sharedState) + acceptedKeys + ) + + updateRejected txTable k = + IntMap.adjust markRejected k txTable + + markRejected txEntry@TxEntry { txLease, txAdvertisers, txAttempts } = + txEntry { + txLease = case txLease of + TxLeased owner _ | owner == peeraddr -> TxClaimable + _ -> txLease, + txAdvertisers = Map.delete peeraddr txAdvertisers, + txAttempts = Map.delete peeraddr txAttempts + } + + +-- | Mark buffered txs as entering mempool submission. +markSubmittingTxs :: Ord peeraddr + => peeraddr + -> [TxKey] + -> SharedTxState peeraddr txid + -> SharedTxState peeraddr txid +markSubmittingTxs _ [] st = st +markSubmittingTxs peeraddr txKeys st = + st { + sharedTxTable = foldl' markOne (sharedTxTable st) txKeys, + sharedGeneration = sharedGeneration st + 1 + } + where + markOne txTable (TxKey k) = IntMap.adjust markSubmitting k txTable + + markSubmitting txEntry@TxEntry { txAttempts } = + txEntry { + txAttempts = Map.adjust toSubmitting peeraddr txAttempts + } + + toSubmitting TxBuffered = TxSubmitting + toSubmitting attempt = attempt + + +-- | Handle a batch of txids received from one peer. -- -collectTxs - :: forall m peeraddr tx txid. - (MonadSTM m, Ord txid, Ord peeraddr, - Show txid, Typeable txid) - => Tracer m (TraceTxLogic peeraddr txid tx) - -> (tx -> SizeInBytes) - -> SharedTxStateVar m peeraddr txid tx - -> peeraddr - -> Map txid SizeInBytes -- ^ set of requested txids with their announced size - -> Map txid tx -- ^ received txs - -> m (Maybe TxSubmissionProtocolError) - -- ^ number of txids to be acknowledged and txs to be added to the - -- mempool -collectTxs tracer txSize sharedVar peeraddr txidsRequested txsMap = do - r <- atomically $ do - st <- readTVar sharedVar - case collectTxsImpl txSize peeraddr txidsRequested txsMap st of - r@(Right st') -> writeTVar sharedVar st' - $> r - r@Left {} -> pure r - case r of - Right st -> traceWith tracer (TraceSharedTxState "collectTxs" st) - $> Nothing - Left e -> return (Just e) +-- Newly seen txids are interned, appended to the peer's unacknowledged queue, +-- and entered into the shared tx table. The first advertiser gets the initial +-- lease and may acknowledge once the body is buffered locally. Later +-- advertisers are tracked as backups and may only acknowledge once the tx is +-- resolved. +handleReceivedTxIds :: forall peeraddr txid tx. (Ord peeraddr, Ord txid) + => (txid -> Bool) + -> Time + -> TxDecisionPolicy + -> peeraddr + -> NumTxIdsToReq + -> [(txid, SizeInBytes)] + -> PeerTxLocalState tx + -> SharedTxState peeraddr txid + -> (PeerTxLocalState tx, SharedTxState peeraddr txid) +handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSizes + peerState sharedState = + (peerState'', sharedState'') + where + + -- Fold over received txids: build unacknowledged list, update tables, + -- and track peers to wake based on tx state (retained/mempool/new). + ( peerUnacknowledgedTxIds' + , peerAvailableTxIds' + , sharedTxIdToKey' + , sharedKeyToTxId' + , sharedNextTxKey' + , sharedTxTable' + , sharedRetainedTxs' + , peersToWake + ) = + foldl' + step + ( peerUnacknowledgedTxIds peerState + , peerAvailableTxIds peerState + , sharedTxIdToKey sharedState + , sharedKeyToTxId sharedState + , sharedNextTxKey sharedState + , sharedTxTable sharedState + , sharedRetainedTxs sharedState + , Set.empty + ) + txidsAndSizes + + peerState'' = peerState { + peerUnacknowledgedTxIds = peerUnacknowledgedTxIds', + peerRequestedTxIds = fromIntegral $ + max 0 ( fromIntegral (peerRequestedTxIds peerState) - + fromIntegral requestedTxIds :: Int ), + peerAvailableTxIds = peerAvailableTxIds' + } + + sharedState' = sharedState { + sharedTxIdToKey = sharedTxIdToKey', + sharedKeyToTxId = sharedKeyToTxId', + sharedNextTxKey = sharedNextTxKey', + sharedTxTable = sharedTxTable', + sharedRetainedTxs = sharedRetainedTxs' + } + + sharedState'' = + bumpIdlePeerGenerations peersToWake sharedState' { + sharedTxTable = sharedTxTable', + sharedRetainedTxs = sharedRetainedTxs', + sharedGeneration = sharedGeneration sharedState' + 1 + } + + retainUntil = addTime (bufferedTxsMinLifetime policy) now + + -- Process each received txid: classify as retained, in mempool, or new entry. + step + :: ( StrictSeq.StrictSeq TxKey + , IntMap.IntMap SizeInBytes + , Map.Map txid TxKey + , IntMap.IntMap txid + , Int + , IntMap.IntMap (TxEntry peeraddr) + , RetainedTxs + , Set.Set peeraddr + ) + -> (txid, SizeInBytes) + -> ( StrictSeq.StrictSeq TxKey + , IntMap.IntMap SizeInBytes + , Map.Map txid TxKey + , IntMap.IntMap txid + , Int + , IntMap.IntMap (TxEntry peeraddr) + , RetainedTxs + , Set.Set peeraddr + ) + step + ( unacknowledgedAcc + , availableAcc + , txIdToKeyAcc + , keyToTxIdAcc + , nextTxKeyAcc + , txTableAcc + , retainedAcc + , peersAcc + ) + (txid, txSize) + | retainedMember k retainedAcc = + ( unacknowledgedAcc StrictSeq.|> txKey + , IntMap.delete k availableAcc + , txIdToKeyAcc' + , keyToTxIdAcc' + , nextTxKeyAcc' + , txTableAcc + , retainedAcc + , peersAcc + ) + | mempoolHasTx txid = + let wakePeers = case IntMap.lookup k txTableAcc of + Just txEntry -> Set.union (advertisersForEntryExcept peeraddr txEntry) peersAcc + Nothing -> peersAcc + in ( unacknowledgedAcc StrictSeq.|> txKey + , IntMap.delete k availableAcc + , txIdToKeyAcc' + , keyToTxIdAcc' + , nextTxKeyAcc' + , IntMap.delete k txTableAcc + , retainedInsertMax k retainUntil retainedAcc + , wakePeers + ) + | otherwise = + case IntMap.lookup k txTableAcc of + Nothing -> + let txEntry = TxEntry { + txLease = TxLeased peeraddr (addTime (interTxSpace policy) now), + txAdvertisers = Map.singleton peeraddr (TxAdvertiser AckWhenBuffered txSize), + txTieBreakSalt = k, + txAttempts = Map.empty + } + in ( unacknowledgedAcc StrictSeq.|> txKey + , IntMap.insert k txSize availableAcc + , txIdToKeyAcc' + , keyToTxIdAcc' + , nextTxKeyAcc' + , IntMap.insert k txEntry txTableAcc + , retainedAcc + , peersAcc + ) + Just txEntry -> + let txEntry' = addAdvertiser txSize txEntry + in ( unacknowledgedAcc StrictSeq.|> txKey + , IntMap.insert k txSize availableAcc + , txIdToKeyAcc' + , keyToTxIdAcc' + , nextTxKeyAcc' + , IntMap.insert k txEntry' txTableAcc + , retainedAcc + , peersAcc + ) + where + (txKey@(TxKey k), txIdToKeyAcc', keyToTxIdAcc', nextTxKeyAcc') = + case Map.lookup txid txIdToKeyAcc of + Just existingKey -> + (existingKey, txIdToKeyAcc, keyToTxIdAcc, nextTxKeyAcc) + Nothing -> + let newKey = TxKey nextTxKeyAcc + in ( newKey + , Map.insert txid newKey txIdToKeyAcc + , IntMap.insert nextTxKeyAcc txid keyToTxIdAcc + , nextTxKeyAcc + 1 + ) + + addAdvertiser txSize' txEntry@TxEntry { txAdvertisers } + | Map.member peeraddr txAdvertisers = + txEntry + | otherwise = + txEntry { + txAdvertisers = Map.insert peeraddr (TxAdvertiser AckWhenResolved txSize') txAdvertisers + } diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 0fa246e4a88..61e27eb1240 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -1,4 +1,3 @@ -{-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE ExistentialQuantification #-} @@ -11,17 +10,28 @@ {-# LANGUAGE TypeApplications #-} module Ouroboros.Network.TxSubmission.Inbound.V2.Types - ( -- * PeerTxState - PeerTxState (..) - -- * SharedTxState - , SharedTxState (..) - -- * Decisions - , TxsToMempool (..) - , TxDecision (..) - , emptyTxDecision + ( -- * Shared state + SharedTxState (..) + -- * RetainedTxs with helper functions + , RetainedTxs + , retainedEmpty + , retainedSingleton + , retainedFromList + , retainedToList + , retainedSize + , retainedLookup + , retainedMember + , retainedInsertMax + , retainedDeleteKeys + , retainedKeysSet + , retainedRestrictKeys + , retainedNextWake + , retainedExpiredKeys + -- * Traces , TraceTxLogic (..) , TxSubmissionInitDelay (..) , defaultTxSubmissionInitDelay + , const_MAX_TX_SIZE_DISCREPANCY -- * Types shared with V1 -- ** Various , ProcessedTxCount (..) @@ -31,307 +41,489 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.Types -- ** Traces , TraceTxSubmissionInbound (..) , TxSubmissionCounters (..) - , mkTxSubmissionCounters -- ** Protocol Error , TxSubmissionProtocolError (..) + , TxOwnerAckState (..) + , TxAdvertiser (..) + , RequestedTxBatch (..) + , TxAttemptState (..) + , TxLease (..) + , TxEntry (..) + , PeerAction (..) + , PeerPhase (..) + , PeerScore (..) + , PeerTxLocalState (..) + , SharedPeerState (..) + , peerGenerationOf + , bumpIdlePeerGenerations + -- TxKey with helper functions + , TxKey (..) + , lookupTxKey + , resolveTxKey + , internTxId + , internTxIds + , emptyPeerScore + , emptyPeerTxLocalState + , emptySharedTxState ) where -import Control.DeepSeq import Control.Exception (Exception (..)) import Control.Monad.Class.MonadTime.SI import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map -import Data.Monoid (Sum (..)) import Data.Sequence.Strict (StrictSeq) -import Data.Set (Set) import Data.Set qualified as Set import Data.Typeable (Typeable, eqT, (:~:) (Refl)) import GHC.Generics (Generic) -import System.Random (StdGen) - -import NoThunks.Class (NoThunks (..)) import Ouroboros.Network.Protocol.TxSubmission2.Type --- | Flag to enable/disable the usage of the new tx-submission logic. --- -data TxSubmissionLogicVersion = - -- | the legacy `Ouroboros.Network.TxSubmission.Inbound.V1` - TxSubmissionLogicV1 - -- | the new `Ouroboros.Network.TxSubmission.Inbound.V2` - | TxSubmissionLogicV2 - deriving (Eq, Show, Enum, Bounded) - +import Data.Foldable (foldl') +import Data.IntMap.Strict (IntMap) +import Data.IntMap.Strict qualified as IntMap +import Data.IntPSQ (IntPSQ) +import Data.IntPSQ qualified as IntPSQ +import Data.IntSet (IntSet) +import Data.IntSet qualified as IntSet +import Data.List (sortOn) +import Data.Sequence.Strict qualified as StrictSeq +import Data.Word (Word64) + + +-- | We check advertised sizes in a fuzzy way. The advertised and received +-- sizes need to agree up to this discrepancy. +const_MAX_TX_SIZE_DISCREPANCY :: SizeInBytes +const_MAX_TX_SIZE_DISCREPANCY = 32 + +-- | Compact internal transaction key used by V2 state. +newtype TxKey = TxKey { unTxKey :: Int } + deriving stock (Eq, Ord, Show, Generic) + deriving newtype (Enum) + +-- | State which determines when a peer that advertised a txid may +-- acknowledge it. -- --- PeerTxState, SharedTxState +-- The owner peer may acknowledge once the body is buffered locally, +-- while other peers must wait until the tx is either accepted by the +-- mempool or fully exhausted. +data TxOwnerAckState + = AckWhenBuffered + | AckWhenResolved + deriving stock (Eq, Ord, Show, Generic) + +-- | Per-peer advertisement state for a tx. -- - -data PeerTxState txid tx = PeerTxState { - -- | Those transactions (by their identifier) that the client has told - -- us about, and which we have not yet acknowledged. This is kept in - -- the order in which the client gave them to us. This is the same order - -- in which we submit them to the mempool (or for this example, the final - -- result order). It is also the order we acknowledge in. - -- - unacknowledgedTxIds :: !(StrictSeq txid), - - -- | Set of known transaction ids which can be requested from this peer. - -- - availableTxIds :: !(Map txid SizeInBytes), - - -- | The number of transaction identifiers that we have requested but - -- which have not yet been replied to. We need to track this it keep - -- our requests within the limit on the number of unacknowledged txids. - -- - requestedTxIdsInflight :: !NumTxIdsToReq, - - -- | The size in bytes of transactions that we have requested but which - -- have not yet been replied to. - -- - requestedTxsInflightSize :: !SizeInBytes, - - -- | The set of requested `txid`s. - -- - requestedTxsInflight :: !(Set txid), - - -- | A subset of `unacknowledgedTxIds` which were unknown to the peer - -- (i.e. requested but not received). We need to track these `txid`s - -- since they need to be acknowledged. - -- - -- We track these `txid` per peer, rather than in `bufferedTxs` map, - -- since that could potentially lead to corrupting the node, not being - -- able to download a `tx` which is needed & available from other nodes. - -- - unknownTxs :: !(Set txid), - - -- | Score is a metric that tracks how usefull a peer has been. - -- The larger the value the less usefull peer. It slowly decays towards - -- zero. - score :: !Double, - - -- | Timestamp for the last time `score` was drained. - scoreTs :: !Time, - - -- | A set of TXs downloaded from the peer. They are not yet - -- acknowledged and haven't been sent to the mempool yet. - -- - -- Life cycle of entries: - -- * added when a tx is downloaded (see `collectTxsImpl`) - -- * follows `unacknowledgedTxIds` (see `acknowledgeTxIds`) - -- - downloadedTxs :: !(Map txid tx), - - -- | A set of TXs on their way to the mempool. - -- Tracked here so that we can cleanup `inSubmissionToMempoolTxs` if the - -- peer dies. - -- - -- Life cycle of entries: - -- * added by `acknowledgeTxIds` (where decide which txs can be - -- submitted to the mempool) - -- * removed by `withMempoolSem` - -- - toMempoolTxs :: !(Map txid tx) - - } - deriving (Eq, Show, Generic, NFData) - -instance ( NoThunks txid - , NoThunks tx - ) => NoThunks (PeerTxState txid tx) +-- The advertised size must be tracked per peer. +data TxAdvertiser = TxAdvertiser { + txAckState :: !TxOwnerAckState, + txAdvertisedSize :: !SizeInBytes + } + deriving stock (Eq, Show, Generic) --- | Shared state of all `TxSubmission` clients. --- --- New `txid` enters `unacknowledgedTxIds` it is also added to `availableTxIds` --- and `referenceCounts` (see `acknowledgeTxIdsImpl`). +-- | Per-peer attempt state for one tx body. -- --- When a `txid` id is selected to be downloaded, it's added to --- `requestedTxsInflightSize` (see --- `Ouroboros.Network.TxSubmission.Inbound.Decision.pickTxsToDownload`). +-- V2 keeps the actual tx body in peer-local state, but shared state still +-- needs to know whether each advertiser is currently downloading, has a +-- buffered body ready, or is submitting it to the mempool. +data TxAttemptState + = -- | The peer has advertised this transaction but has not yet started + -- downloading the tx body. This is the initial state when a peer first + -- advertises a txid. + TxNoAttempt + + | -- | The peer is currently downloading the tx body from another peer. + -- The tx body is being fetched and has not yet been received. + TxDownloading + + | -- | The peer has finished downloading the tx body and it is buffered + -- locally, waiting to be submitted to the mempool. + TxBuffered + + | -- | The peer is submitting the tx body to the mempool. This + -- is the final state before the transaction leaves the tracking + -- system (either accepted into the mempool or rejected). + TxSubmitting + deriving stock (Eq, Ord, Show, Generic) + +-- | The current download lease for a tx body. -- --- When the request arrives, the `txid` is removed from `inflightTxs`. It --- might be added to `unknownTxs` if the server didn't have that `txid`, or --- it's added to `bufferedTxs` (see `collectTxsImpl`). --- --- Whenever we choose `txid` to acknowledge (either in `acknowledtxsIdsImpl`, --- `collectTxsImpl` or --- `Ouroboros.Network.TxSubmission.Inbound.Decision.pickTxsToDownload`, we also --- recalculate `referenceCounts` and only keep live `txid`s in other maps (e.g. --- `availableTxIds`, `bufferedTxs`, `unknownTxs`). --- -data SharedTxState peeraddr txid tx = SharedTxState { - - -- | Map of peer states. - -- - -- /Invariant:/ for peeraddr's which are registered using `withPeer`, - -- there's always an entry in this map even if the set of `txid`s is - -- empty. - -- - peerTxStates :: !(Map peeraddr (PeerTxState txid tx)), - - -- | Set of transactions which are in-flight (have already been - -- requested) together with multiplicities (from how many peers it is - -- currently in-flight) - -- - -- This set can intersect with `availableTxIds`. - -- - inflightTxs :: !(Map txid Int), - - -- | Map of `tx` which: - -- - -- * were downloaded and added to the mempool, - -- * are already in the mempool (`Nothing` is inserted in that case), - -- - -- We only keep live `txid`, e.g. ones which `txid` is unacknowledged by - -- at least one peer or has a `timedTxs` entry. - -- - -- /Note:/ `txid`s which `tx` were unknown by a peer are tracked - -- separately in `unknownTxs`. - -- - -- /Note:/ previous implementation also needed to explicitly track - -- `txid`s which were already acknowledged, but are still unacknowledged. - -- In this implementation, this is done using reference counting. - -- - -- This map is useful to acknowledge `txid`s, it's basically taking the - -- longest prefix which contains entries in `bufferedTxs` or `unknownTxs`. - -- - bufferedTxs :: !(Map txid (Maybe tx)), - - -- | We track reference counts of all unacknowledged and timedTxs txids. - -- Once the count reaches 0, a tx is removed from `bufferedTxs`. - -- - -- The `bufferedTx` map contains a subset of `txid` which - -- `referenceCounts` contains. - -- - -- /Invariants:/ - -- - -- * the txid count is equal to multiplicity of txid in all - -- `unacknowledgedTxIds` sequences; - -- * @Map.keysSet bufferedTxs `Set.isSubsetOf` Map.keysSet referenceCounts@; - -- * all counts are positive integers. - -- - referenceCounts :: !(Map txid Int), - - -- | A set of timeouts for txids that have been added to bufferedTxs after being - -- inserted into the mempool. - -- - -- We need these short timeouts to avoid re-downloading a `tx`. We could - -- acknowledge this `txid` to all peers, when a peer from another - -- continent presents us it again. - -- - -- Every txid entry has a reference count in `referenceCounts`. - -- - timedTxs :: !(Map Time [txid]), - - -- | A set of txids that have been downloaded by a peer and are on their - -- way to the mempool. We won't issue further fetch-requests for TXs in - -- this state. We track these txs to not re-download them from another - -- peer. - -- - -- * We subtract from the counter when a given tx is added or rejected by - -- the mempool or do that for all txs in `toMempoolTxs` when a peer is - -- unregistered. - -- * We add to the counter when a given tx is selected to be added to the - -- mempool in `pickTxsToDownload`. - -- - inSubmissionToMempoolTxs :: !(Map txid Int), - - -- | Rng used to randomly order peers - peerRng :: !StdGen - } - deriving (Eq, Show, Generic, NFData) - -instance ( NoThunks peeraddr - , NoThunks tx - , NoThunks txid - , NoThunks StdGen - ) => NoThunks (SharedTxState peeraddr txid tx) +-- A tx is either currently leased to a peer until a deadline or it is +-- unowned and can be claimed by an eligible advertiser. +data TxLease peeraddr = TxLeased !peeraddr !Time + | TxClaimable + deriving stock (Eq, Show, Generic) +-- | Shared per-tx state. +-- +-- V2 keeps all cross-peer coordination at tx granularity. The first +-- peer that advertises a new txid becomes its initial owner. If the +-- lease expires without a successful outcome, the tx becomes claimable +-- and another eligible advertiser may atomically claim it. +data TxEntry peeraddr = TxEntry { + -- | Current owner lease for downloading the tx body. + txLease :: !(TxLease peeraddr), + + -- | Peers that have advertised this tx. + txAdvertisers :: !(Map peeraddr TxAdvertiser), + + -- | Stable salt used to break ties between equally scored advertisers. + txTieBreakSalt :: !Int, + + -- | Current per-peer attempt state for this tx body. + txAttempts :: !(Map peeraddr TxAttemptState) + } + deriving stock (Eq, Show, Generic) +-- | The next peer-local action chosen by the V2 worker thread. -- --- Decisions +-- V2 drives progress from the peer thread itself. Shared state only decides +-- whether a peer is allowed to perform an action; the peer thread then carries +-- it out directly. +data PeerAction + = -- | No immediate work is available. The peer should wait for either a + -- shared-state generation change or the optional timeout. + PeerDoNothing !Word64 !(Maybe DiffTime) + | -- | Send a txid protocol message acknowledging the first argument and + -- requesting the second argument. + PeerRequestTxIds !NumTxIdsToAck !NumTxIdsToReq + | -- | Request tx bodies for the given internally keyed txs from this peer. + PeerRequestTxs ![TxKey] + | -- | Submit the buffered txs identified by the given keys to the local + -- mempool. + PeerSubmitTxs ![TxKey] + deriving stock (Eq, Show, Generic) + + +-- | A batch of transaction body requests sent to a peer. -- +-- Tracks the set of requested txids and the total expected +-- size in bytes for the batch. Used to manage inflight requests and match +-- responses to the original request. +data RequestedTxBatch = RequestedTxBatch { + -- | The set of transaction keys requested in this batch. + requestedTxBatchSet :: !IntSet + + -- | Total expected size in bytes for all tx bodies in this batch. + , requestedTxBatchSize :: !SizeInBytes + } + deriving stock (Eq, Show, Generic) -newtype TxsToMempool txid tx = TxsToMempool { listOfTxsToMempool :: [(txid, tx)] } - deriving newtype (Eq, Show, Semigroup, Monoid, NFData) - - --- | Decision made by the decision logic. Each peer will receive a 'Decision'. +-- | Coarse phase of a peer worker thread. -- --- /note:/ it is rather non-standard to represent a choice between requesting --- `txid`s and `tx`'s as a product rather than a sum type. The client will --- need to download `tx`s first and then send a request for more txids (and --- acknowledge some `txid`s). Due to pipelining each client will request --- decision from the decision logic quite often (every two pipelined requests), --- but with this design a decision once taken will make the peer non-active --- (e.g. it won't be returned by `filterActivePeers`) for longer, and thus the --- expensive `makeDecision` computation will not need to take that peer into --- account. +-- The phase is used when deciding whether a peer is currently eligible +-- to claim an expired tx lease. +data PeerPhase + = -- | The peer worker is idle and may claim work for advertised txs. + PeerIdle + | -- | The peer worker is waiting for a txid reply from the remote peer. + PeerWaitingTxIds + | -- | The peer worker is waiting for a tx-body reply from the remote peer. + PeerWaitingTxs + | -- | The peer worker is submitting buffered txs to the local mempool. + PeerSubmittingToMempool + deriving stock (Eq, Ord, Show, Generic) + +-- | Peer usefulness score. -- -data TxDecision txid tx = TxDecision { - txdTxIdsToAcknowledge :: !NumTxIdsToAck, - -- ^ txid's to acknowledge +-- Lower is better. +data PeerScore = PeerScore { + peerScoreValue :: !Double, + peerScoreTs :: !Time + } + deriving stock (Eq, Show, Generic) - txdTxIdsToRequest :: !NumTxIdsToReq, - -- ^ number of txid's to request +-- | Low-cost monotonic counters for the V2 peer protocol and coordination path. +-- +-- These counters are updated incrementally at the points where V2 performs +-- protocol sends, receives replies, or cheaply classifies received bodies. +-- They are kept separate from 'SharedTxState' so that emission can read a +-- small dedicated counter cell without scanning protocol state. +data TxSubmissionCounters = TxSubmissionCounters { + txIdMessagesSent :: !Word64, + txIdsRequested :: !Word64, + txIdRepliesReceived :: !Word64, + txIdsReceived :: !Word64, + txMessagesSent :: !Word64, + txsRequested :: !Word64, + txRepliesReceived :: !Word64, + txsReceived :: !Word64, + txsOmitted :: !Word64, + lateBodies :: !Word64 + } + deriving stock (Eq, Show, Generic) + +instance Semigroup TxSubmissionCounters where + a <> b = TxSubmissionCounters { + txIdMessagesSent = txIdMessagesSent a + txIdMessagesSent b, + txIdsRequested = txIdsRequested a + txIdsRequested b, + txIdRepliesReceived = txIdRepliesReceived a + txIdRepliesReceived b, + txIdsReceived = txIdsReceived a + txIdsReceived b, + txMessagesSent = txMessagesSent a + txMessagesSent b, + txsRequested = txsRequested a + txsRequested b, + txRepliesReceived = txRepliesReceived a + txRepliesReceived b, + txsReceived = txsReceived a + txsReceived b, + txsOmitted = txsOmitted a + txsOmitted b, + lateBodies = lateBodies a + lateBodies b + } - txdPipelineTxIds :: !Bool, - -- ^ the tx-submission protocol only allows to pipeline `txid`'s requests - -- if we have non-acknowledged `txid`s. +instance Monoid TxSubmissionCounters where + mempty = TxSubmissionCounters { + txIdMessagesSent = 0, + txIdsRequested = 0, + txIdRepliesReceived = 0, + txIdsReceived = 0, + txMessagesSent = 0, + txsRequested = 0, + txRepliesReceived = 0, + txsReceived = 0, + txsOmitted = 0, + lateBodies = 0 + } - txdTxsToRequest :: !(Map txid SizeInBytes), - -- ^ txid's to download. +emptyPeerScore :: Time -> PeerScore +emptyPeerScore scoreTs = PeerScore { + peerScoreValue = 0, + peerScoreTs = scoreTs + } - txdTxsToMempool :: !(TxsToMempool txid tx) - -- ^ list of `tx`s to submit to the mempool. +-- | Per-peer protocol state. +-- +-- These are the pieces of state that naturally belong to the worker +-- thread handling one peer. Shared arbitration state such as peer +-- phase and peer score is kept separately in 'SharedPeerState'. +data PeerTxLocalState tx = PeerTxLocalState { + -- | Unacknowledged txids in the order advertised by the peer. + peerUnacknowledgedTxIds :: !(StrictSeq TxKey), + + -- | Txids this peer currently advertises and that may be requested from + -- it if the peer becomes the owner. + peerAvailableTxIds :: !(IntMap SizeInBytes), + + -- | Requested txids that have not yet been replied to. + peerRequestedTxs :: !IntSet, + peerRequestedTxBatches :: !(StrictSeq RequestedTxBatch), + peerRequestedTxsSize :: !SizeInBytes, + peerRequestedTxIds :: !NumTxIdsToReq, + + -- | Tx bodies downloaded from this peer and buffered locally until they + -- are either submitted to the mempool or superseded by shared-state + -- resolution. + peerDownloadedTxs :: !(IntMap tx) + } + deriving stock (Eq, Show, Generic) + +emptyPeerTxLocalState :: PeerTxLocalState tx +emptyPeerTxLocalState = PeerTxLocalState { + peerUnacknowledgedTxIds = StrictSeq.empty, + peerAvailableTxIds = IntMap.empty, + peerRequestedTxs = IntSet.empty, + peerRequestedTxBatches = StrictSeq.empty, + peerRequestedTxsSize = 0, + peerRequestedTxIds = 0, + peerDownloadedTxs = IntMap.empty } - deriving (Show, Eq) -instance (NFData txid, NFData tx) => NFData (TxDecision txid tx) where - -- all fields except `txdTxsToMempool` when evaluated to WHNF evaluate to NF. - rnf TxDecision {txdTxsToMempool} = rnf txdTxsToMempool +-- | Small shared view of peer state used for lease claiming and peer +-- selection. +data SharedPeerState = SharedPeerState { + sharedPeerPhase :: !PeerPhase, + sharedPeerScore :: !PeerScore, + sharedPeerGeneration :: !Word64, + sharedPeerRequestedTxBatches :: !Int, + sharedPeerRequestedTxsSize :: !SizeInBytes + } + deriving stock (Eq, Show, Generic) --- | A non-commutative semigroup instance. +-- | Shared V2 state. -- --- /note:/ this instance must be consistent with `pickTxsToDownload` and how --- `PeerTxState` is updated. It is designed to work with `TMergeVar`s. --- -instance Ord txid => Semigroup (TxDecision txid tx) where - TxDecision { txdTxIdsToAcknowledge, - txdTxIdsToRequest, - txdPipelineTxIds = _ignored, - txdTxsToRequest, - txdTxsToMempool } - <> - TxDecision { txdTxIdsToAcknowledge = txdTxIdsToAcknowledge', - txdTxIdsToRequest = txdTxIdsToRequest', - txdPipelineTxIds = txdPipelineTxIds', - txdTxsToRequest = txdTxsToRequest', - txdTxsToMempool = txdTxsToMempool' } - = - TxDecision { txdTxIdsToAcknowledge = txdTxIdsToAcknowledge + txdTxIdsToAcknowledge', - txdTxIdsToRequest = txdTxIdsToRequest + txdTxIdsToRequest', - txdPipelineTxIds = txdPipelineTxIds', - txdTxsToRequest = txdTxsToRequest <> txdTxsToRequest', - txdTxsToMempool = txdTxsToMempool <> txdTxsToMempool' - } - --- | A no-op decision. -emptyTxDecision :: TxDecision txid tx -emptyTxDecision = TxDecision { - txdTxIdsToAcknowledge = 0, - txdTxIdsToRequest = 0, - txdPipelineTxIds = False, - txdTxsToRequest = Map.empty, - txdTxsToMempool = mempty +-- There is no global decision thread. Peer worker threads coordinate by +-- atomically reading and updating this shared state. +data SharedTxState peeraddr txid = SharedTxState { + sharedPeers :: !(Map peeraddr SharedPeerState), + -- | Active unresolved txs that still participate in leasing, buffering, + -- submission and advertiser tracking. + sharedTxTable :: !(IntMap (TxEntry peeraddr)), + -- | Accepted txs retained locally for a bounded time so later txid + -- advertisements can be acked without re-requesting the body. + sharedRetainedTxs :: !RetainedTxs, + sharedTxIdToKey :: !(Map txid TxKey), + sharedKeyToTxId :: !(IntMap txid), + sharedNextTxKey :: !Int, + sharedGeneration :: !Word64 + } + deriving stock (Eq, Show, Generic) + +type RetainedTxs = IntPSQ Time () + +emptySharedTxState :: SharedTxState peeraddr txid +emptySharedTxState = SharedTxState { + sharedPeers = Map.empty, + sharedTxTable = IntMap.empty, + sharedRetainedTxs = retainedEmpty, + sharedTxIdToKey = Map.empty, + sharedKeyToTxId = IntMap.empty, + sharedNextTxKey = 0, + sharedGeneration = 0 } +retainedEmpty :: RetainedTxs +retainedEmpty = IntPSQ.empty + +retainedSingleton :: Int -> Time -> RetainedTxs +retainedSingleton k retainUntil = + IntPSQ.insert k retainUntil () retainedEmpty + +retainedFromList :: [(Int, Time)] -> RetainedTxs +retainedFromList = + foldl' (\retained (k, retainUntil) -> retainedInsertMax k retainUntil retained) retainedEmpty + +retainedToList :: RetainedTxs -> [(Int, Time)] +retainedToList = + sortOn fst + . fmap (\(k, retainUntil, ()) -> (k, retainUntil)) + . IntPSQ.toList + +retainedSize :: RetainedTxs -> Int +retainedSize = IntPSQ.size + +retainedLookup :: Int -> RetainedTxs -> Maybe Time +retainedLookup k retained = + fmap fst (IntPSQ.lookup k retained) + +retainedMember :: Int -> RetainedTxs -> Bool +retainedMember k retained = + case IntPSQ.lookup k retained of + Just _ -> True + Nothing -> False + +retainedInsertMax :: Int -> Time -> RetainedTxs -> RetainedTxs +retainedInsertMax k retainUntil retained = + IntPSQ.insert k retainUntil' () retained + where + retainUntil' = + case retainedLookup k retained of + Just existing -> max existing retainUntil + Nothing -> retainUntil + +retainedDeleteKeys :: IntSet -> RetainedTxs -> RetainedTxs +retainedDeleteKeys keys retained = + IntSet.foldl' (flip IntPSQ.delete) retained keys + +retainedKeysSet :: RetainedTxs -> IntSet +retainedKeysSet = + IntPSQ.fold' (\k _ _ acc -> IntSet.insert k acc) IntSet.empty + +retainedRestrictKeys :: RetainedTxs -> IntSet -> RetainedTxs +retainedRestrictKeys retained keys = + IntPSQ.fold' keep retainedEmpty retained + where + keep k retainUntil _ + | IntSet.member k keys = IntPSQ.insert k retainUntil () + | otherwise = id + +retainedNextWake :: Time -> RetainedTxs -> Maybe Time +retainedNextWake currentTime = + go + where + go retained = + case IntPSQ.minView retained of + Just (_, retainUntil, (), retained') + | retainUntil > currentTime -> Just retainUntil + | otherwise -> go retained' + Nothing -> + Nothing + +retainedExpiredKeys :: Time -> RetainedTxs -> IntSet +retainedExpiredKeys currentTime = + go IntSet.empty + where + go expired retained = + case IntPSQ.minView retained of + Just (k, retainUntil, (), retained') + | retainUntil <= currentTime -> + go (IntSet.insert k expired) retained' + | otherwise -> + expired + Nothing -> + expired + +peerGenerationOf :: Ord peeraddr + => peeraddr + -> SharedTxState peeraddr txid + -> Word64 +peerGenerationOf peeraddr SharedTxState { sharedPeers } = + case Map.lookup peeraddr sharedPeers of + Just SharedPeerState { sharedPeerGeneration } -> sharedPeerGeneration + Nothing -> error "TxSubmission.V2.peerGenerationOf: missing peer" + +bumpIdlePeerGenerations :: Ord peeraddr + => Set.Set peeraddr + -> SharedTxState peeraddr txid + -> SharedTxState peeraddr txid +bumpIdlePeerGenerations peersToWake st@SharedTxState { sharedPeers } = + st { + sharedPeers = Map.mapWithKey bumpPeer sharedPeers + } + where + bumpPeer peeraddr sharedPeerState@SharedPeerState { sharedPeerPhase, sharedPeerGeneration } + | Set.member peeraddr peersToWake + , sharedPeerPhase == PeerIdle = + sharedPeerState { sharedPeerGeneration = sharedPeerGeneration + 1 } + | otherwise = sharedPeerState + +lookupTxKey :: Ord txid + => txid + -> SharedTxState peeraddr txid + -> Maybe TxKey +lookupTxKey txid SharedTxState { sharedTxIdToKey } = Map.lookup txid sharedTxIdToKey + +resolveTxKey :: SharedTxState peeraddr txid + -> TxKey + -> txid +resolveTxKey SharedTxState { sharedKeyToTxId } (TxKey k) = + case IntMap.lookup k sharedKeyToTxId of + Just txid -> txid + Nothing -> error "TxSubmission.V2.resolveTxKey: missing tx key" + +internTxId :: Ord txid + => txid + -> SharedTxState peeraddr txid + -> (TxKey, SharedTxState peeraddr txid) +internTxId txid st@SharedTxState { sharedTxIdToKey, sharedKeyToTxId, sharedNextTxKey } + | Just key <- Map.lookup txid sharedTxIdToKey = (key, st) + | otherwise = + let key = TxKey sharedNextTxKey + in ( key + , st { sharedTxIdToKey = Map.insert txid key sharedTxIdToKey + , sharedKeyToTxId = IntMap.insert sharedNextTxKey txid sharedKeyToTxId + , sharedNextTxKey = sharedNextTxKey + 1 + } + ) + +internTxIds :: (Foldable f, Ord txid) + => f txid + -> SharedTxState peeraddr txid + -> (Map txid TxKey, SharedTxState peeraddr txid) +internTxIds txids st0 = foldl' step (Map.empty, st0) txids + where + step (acc, st) txid = + let (key, st') = internTxId txid st + in (Map.insert txid key acc, st') + +-- | Flag to enable/disable the usage of the new tx-submission logic. +-- +data TxSubmissionLogicVersion = + -- | the legacy `Ouroboros.Network.TxSubmission.Inbound.V1` + TxSubmissionLogicV1 + -- | the new `Ouroboros.Network.TxSubmission.Inbound.V2` + | TxSubmissionLogicV2 + deriving (Eq, Show, Enum, Bounded) -- | TxLogic tracer. -- data TraceTxLogic peeraddr txid tx = - TraceSharedTxState String (SharedTxState peeraddr txid tx) - | TraceTxDecisions (Map peeraddr (TxDecision txid tx)) + TraceSharedTxState String (SharedTxState peeraddr txid) deriving Show @@ -380,6 +572,7 @@ data TraceTxSubmissionInbound txid tx = | TraceTxInboundAddedToMempool [txid] DiffTime | TraceTxInboundRejectedFromMempool [txid] DiffTime | TraceTxInboundError TxSubmissionProtocolError + | TraceTxInboundRequestTxs [txid] -- -- messages emitted by the new implementation of the server in @@ -389,47 +582,9 @@ data TraceTxSubmissionInbound txid tx = -- | Server received 'MsgDone' | TraceTxInboundTerminated - | TraceTxInboundDecision (TxDecision txid tx) deriving (Eq, Show) -data TxSubmissionCounters = - TxSubmissionCounters { - numOfOutstandingTxIds :: Int, - -- ^ txids which are not yet downloaded. This is a diff of keys sets of - -- `referenceCounts` and a sum of `bufferedTxs` and - -- `insubmissionToMempoolTxs` maps. - numOfBufferedTxs :: Int, - -- ^ number of all buffered txs (downloaded or not available) - numOfInSubmissionToMempoolTxs :: Int, - -- ^ number of all tx's which were submitted to the mempool - numOfTxIdsInflight :: Int - -- ^ number of all in-flight txid's. - } - deriving (Eq, Show) - -mkTxSubmissionCounters - :: Ord txid - => SharedTxState peeraddr txid tx - -> TxSubmissionCounters -mkTxSubmissionCounters - SharedTxState { - inflightTxs, - bufferedTxs, - referenceCounts, - inSubmissionToMempoolTxs - } - = - TxSubmissionCounters { - numOfOutstandingTxIds = Set.size $ Map.keysSet referenceCounts - Set.\\ Map.keysSet bufferedTxs - Set.\\ Map.keysSet inSubmissionToMempoolTxs, - numOfBufferedTxs = Map.size bufferedTxs, - numOfInSubmissionToMempoolTxs = Map.size inSubmissionToMempoolTxs, - numOfTxIdsInflight = getSum $ foldMap Sum inflightTxs - } - - data TxSubmissionProtocolError = ProtocolErrorTxNotRequested | ProtocolErrorTxIdsNotRequested diff --git a/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs b/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs index fdd077339aa..876d50979df 100644 --- a/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs +++ b/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs @@ -29,6 +29,7 @@ import Data.Bifunctor (first) import Data.Bool (bool) import Data.Foldable (toList) import Data.IP (fromHostAddress, fromHostAddress6) +import Data.IntMap.Strict qualified as IntMap import Data.Map.Strict qualified as Map import Data.Set qualified as Set import Data.Text (Text, pack) @@ -86,10 +87,12 @@ import Ouroboros.Network.Server qualified as Server import Ouroboros.Network.Server.RateLimiting (AcceptConnectionsPolicyTrace (..), AcceptedConnectionsLimit (..)) import Ouroboros.Network.Snocket (LocalAddress (..), RemoteAddress) -import Ouroboros.Network.TxSubmission.Inbound.V2 (ProcessedTxCount (..), - TraceTxLogic (..), TraceTxSubmissionInbound (..)) import Ouroboros.Network.TxSubmission.Inbound.V2.Types - (TxSubmissionLogicVersion (..)) + (ProcessedTxCount (..), SharedPeerState (..), + SharedTxState (..), TraceTxLogic (..), + TraceTxSubmissionInbound (..), TxAttemptState (..), TxEntry (..), + TxLease (..), TxSubmissionLogicVersion (..), retainedSize, + retainedToList) import Ouroboros.Network.TxSubmission.Outbound (TraceTxSubmissionOutbound (..)) -- Helper function for ToJSON instances with a "kind" field @@ -1769,6 +1772,11 @@ instance ( ToJSON txid , "txids" .= toJSON txids , "time" .= diffTime ] + toJSON (TraceTxInboundRequestTxs txids) = + object + [ "kind" .= String "TxInboundRequestTxs" + , "txids" .= toJSON txids + ] toJSON (TraceTxInboundError err) = object [ "kind" .= String "TxInboundError" @@ -1778,12 +1786,78 @@ instance ( ToJSON txid object [ "kind" .= String "TxInboundTerminated" ] - toJSON (TraceTxInboundDecision decision) = + +traceSharedTxStateToJSON + :: (Show addr, Show txid) + => SharedTxState addr txid + -> Value +traceSharedTxStateToJSON SharedTxState { + sharedPeers, + sharedTxTable, + sharedRetainedTxs, + sharedTxIdToKey, + sharedKeyToTxId, + sharedGeneration + } = object - [ "kind" .= String "TxInboundDecision" - -- TODO: this is too verbose, it will show full tx's - , "decision" .= String (pack $ show decision) + [ "sharedGeneration" .= sharedGeneration + , "peerCount" .= Map.size sharedPeers + , "activeTxCount" .= IntMap.size sharedTxTable + , "retainedTxCount" .= retainedSize sharedRetainedTxs + , "internedTxCount" .= Map.size sharedTxIdToKey + , "leasedTxCount" .= leasedTxCount + , "claimableTxCount" .= claimableTxCount + , "resolvedTxCount" .= resolvedTxCount + , "downloadingAttemptCount" .= downloadingAttemptCount + , "bufferedAttemptCount" .= bufferedAttemptCount + , "submittingAttemptCount" .= submittingAttemptCount + , "peerPhases" .= peerPhases + , "sharedPeers" .= [ (show peeraddr, show peerState) + | (peeraddr, peerState) <- Map.toList sharedPeers + ] + , "sharedTxTable" .= [ (renderTxId txKey, show txEntry) + | (txKey, txEntry) <- IntMap.toList sharedTxTable + ] + , "sharedRetainedTxs" .= [ (renderTxId txKey, show retainUntil) + | (txKey, retainUntil) <- retainedToList sharedRetainedTxs + ] + , "internedTxIds" .= fmap show (Map.keys sharedTxIdToKey) ] + where + activeEntries = IntMap.elems sharedTxTable + + leasedTxCount = + length [ () | TxEntry { txLease = TxLeased _ _ } <- activeEntries ] + + claimableTxCount = + length [ () | TxEntry { txLease = TxClaimable } <- activeEntries ] + + resolvedTxCount = 0 :: Int + + downloadingAttemptCount = + sum [ length [ () | TxDownloading <- Map.elems txAttempts ] + | TxEntry { txAttempts } <- activeEntries + ] + + bufferedAttemptCount = + sum [ length [ () | TxBuffered <- Map.elems txAttempts ] + | TxEntry { txAttempts } <- activeEntries + ] + + submittingAttemptCount = + sum [ length [ () | TxSubmitting <- Map.elems txAttempts ] + | TxEntry { txAttempts } <- activeEntries + ] + + peerPhases = + Map.toList $ + Map.fromListWith (+) + [ (show sharedPeerPhase, 1 :: Int) + | SharedPeerState { sharedPeerPhase } <- Map.elems sharedPeers + ] + + renderTxId txKey = + maybe "" show (IntMap.lookup txKey sharedKeyToTxId) -- TODO: in cardano-node in the `coot/tx-submission-10.5` branch there's -- a better instance. @@ -1795,11 +1869,7 @@ instance ( Show addr toJSON (TraceSharedTxState tag st) = object [ "kind" .= String "SharedTxState" , "tag" .= String (pack tag) - , "sharedTxState" .= String (pack . show $ st) - ] - toJSON (TraceTxDecisions decisions) = - object [ "kind" .= String "TxDecisions" - , "decisions" .= String (pack . show $ decisions) + , "sharedTxState" .= traceSharedTxStateToJSON st ] diff --git a/ouroboros-network/ouroboros-network.cabal b/ouroboros-network/ouroboros-network.cabal index 38e45f56e93..023e28d2952 100644 --- a/ouroboros-network/ouroboros-network.cabal +++ b/ouroboros-network/ouroboros-network.cabal @@ -253,7 +253,6 @@ library Ouroboros.Network.PeerSharing Ouroboros.Network.TxSubmission.Inbound.V1 Ouroboros.Network.TxSubmission.Inbound.V2 - Ouroboros.Network.TxSubmission.Inbound.V2.Decision Ouroboros.Network.TxSubmission.Inbound.V2.Policy Ouroboros.Network.TxSubmission.Inbound.V2.Registry Ouroboros.Network.TxSubmission.Inbound.V2.State @@ -310,7 +309,7 @@ library dlist, dns, hashable, - io-classes:{io-classes, mtl, si-timers, strict-mvar, strict-stm} ^>=1.8 || ^>= 1.9, + io-classes:{io-classes, mtl, si-timers, strict-stm} ^>=1.8 || ^>= 1.9, iproute, monoidal-synchronisation, mtl, @@ -658,7 +657,6 @@ library tracing aeson, base >=4.14 && <4.23, containers, - io-classes:si-timers, iproute, ouroboros-network:{ouroboros-network, orphan-instances, protocols}, text, @@ -928,7 +926,7 @@ library ouroboros-network-tests-lib deepseq, dns, hashable, - io-classes:{io-classes, si-timers, strict-mvar, strict-stm}, + io-classes:{io-classes, si-timers, strict-stm}, io-sim, iproute, monoidal-synchronisation, diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/Diffusion/Node.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/Diffusion/Node.hs index 47c8080dbb7..2f4e42c88b5 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/Diffusion/Node.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/Diffusion/Node.hs @@ -110,7 +110,6 @@ import Ouroboros.Network.Snocket (MakeBearer, Snocket, TestAddress (..), invalidFileDescriptor) import Ouroboros.Network.TxSubmission.Inbound.V2.Policy (TxDecisionPolicy) -import Ouroboros.Network.TxSubmission.Inbound.V2.Registry (decisionLogicThreads) import Ouroboros.Network.TxSubmission.Inbound.V2.Types (TraceTxLogic) import Simulation.Network.Snocket (AddressType (..), FD) @@ -126,7 +125,6 @@ import Test.Ouroboros.Network.PeerSelection.RootPeersDNS (DNSLookupDelay, DNSTimeout, DomainAccessPoint (..), MockDNSLookupResult, mockDNSActions) import Test.Ouroboros.Network.TxSubmission.Types (Tx) -import Test.Ouroboros.Network.Utils @@ -255,7 +253,7 @@ run blockGeneratorArgs ni na extraPeersAPI psArgs toExtraPeers requestPublicRootPeers peerChurnGovernor tracers tracerBlockFetch - tracerTxLogic mkApps = do + _tracerTxLogic mkApps = do labelThisThread ("node-" ++ Node.ppNtNAddr (aIPAddress na)) Node.withNodeKernelThread (aIPAddress na) blockGeneratorArgs (aTxs na) $ \ nodeKernel nodeKernelThread -> do @@ -340,17 +338,9 @@ run blockGeneratorArgs ni na (mkApps nodeKernel keepAliveStdGen)) $ \ diffusionThread -> withAsync (blockFetch nodeKernel) $ \blockFetchLogicThread -> - - withAsync (decisionLogicThreads - tracerTxLogic - sayTracer - (aTxDecisionPolicy na) - (nkTxChannelsVar nodeKernel) - (nkSharedTxStateVar nodeKernel)) $ \decLogicThread -> - wait diffusionThread - <> wait blockFetchLogicThread - <> wait nodeKernelThread - <> wait decLogicThread + wait diffusionThread + <> wait blockFetchLogicThread + <> wait nodeKernelThread where blockFetch :: NodeKernel BlockHeader Block s txid m -> m Void diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/Diffusion/Node/Kernel.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/Diffusion/Node/Kernel.hs index e0301c7149d..51c765cb4bb 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/Diffusion/Node/Kernel.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/Diffusion/Node/Kernel.hs @@ -33,7 +33,6 @@ module Test.Ouroboros.Network.Diffusion.Node.Kernel ) where import Control.Applicative (Alternative) -import Control.Concurrent.Class.MonadMVar.Strict qualified as Strict import Control.Concurrent.Class.MonadSTM qualified as LazySTM import Control.Concurrent.Class.MonadSTM.Strict import Control.DeepSeq (NFData (..)) @@ -85,8 +84,9 @@ import Ouroboros.Network.PeerSharing (PeerSharingAPI, PeerSharingRegistry (..), import Ouroboros.Network.Protocol.Handshake.Unversioned import Ouroboros.Network.Snocket (TestAddress (..)) import Ouroboros.Network.TxSubmission.Inbound.V2.Registry (SharedTxStateVar, - TxChannels (..), TxChannelsVar, TxMempoolSem, newSharedTxStateVar, - newTxMempoolSem) + TxSubmissionCountersVar, newSharedTxStateVar, + newTxSubmissionCountersVar) +import Ouroboros.Network.TxSubmission.Inbound.V2.Types (emptySharedTxState) import Test.Ouroboros.Network.Diffusion.Node.ChainDB (ChainDB (..)) import Test.Ouroboros.Network.Diffusion.Node.ChainDB qualified as ChainDB @@ -318,19 +318,15 @@ data NodeKernel header block s txid m = NodeKernel { nkMempool :: Mempool m txid (Tx txid), - nkTxChannelsVar - :: TxChannelsVar m NtNAddr txid (Tx txid), - - nkTxMempoolSem - :: TxMempoolSem m, + nkTxCountersVar + :: TxSubmissionCountersVar m, nkSharedTxStateVar - :: SharedTxStateVar m NtNAddr txid (Tx txid) + :: SharedTxStateVar m NtNAddr txid } newNodeKernel :: ( MonadTraceSTM m , MonadLabelledSTM m - , Strict.MonadMVar m , RandomGen rng , Ord txid , Eq txid @@ -339,7 +335,7 @@ newNodeKernel :: ( MonadTraceSTM m -> Int -> [Tx txid] -> m (NodeKernel header block rng txid m) -newNodeKernel psRng txSeed txs = do +newNodeKernel psRng _txSeed txs = do publicStateVar <- makePublicPeerSelectionStateVar labelTVarIO publicStateVar "public-peer-selection-state-var" traceTVarIO publicStateVar (\_ a -> return $ TraceString (show a)) @@ -354,9 +350,8 @@ newNodeKernel psRng txSeed txs = do ps_POLICY_PEER_SHARE_MAX_PEERS <*> pure publicStateVar <*> newMempool txs - <*> Strict.newMVar (TxChannels Map.empty) - <*> newTxMempoolSem - <*> newSharedTxStateVar (Random.mkStdGen txSeed) + <*> newTxSubmissionCountersVar mempty + <*> newSharedTxStateVar emptySharedTxState -- | Register a new upstream chain-sync client. -- @@ -434,7 +429,6 @@ withNodeKernelThread , MonadThrow (STM m) , MonadTraceSTM m , MonadLabelledSTM m - , Strict.MonadMVar m , HasFullHeader block , RandomGen seed , SplitGen seed diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs index b33d29b9bb8..198ec8cee7b 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs @@ -19,7 +19,6 @@ import Prelude hiding (seq) import NoThunks.Class -import Control.Concurrent.Class.MonadMVar.Strict import Control.Concurrent.Class.MonadSTM qualified as Lazy import Control.Concurrent.Class.MonadSTM.Strict import Control.Monad.Class.MonadAsync @@ -35,7 +34,6 @@ import Control.Tracer (Tracer (..), contramap) import Data.ByteString.Lazy qualified as BSL import Data.Foldable (traverse_) import Data.Function (on) -import Data.Hashable import Data.List (nubBy) import Data.List qualified as List import Data.List.Trace qualified as Trace @@ -46,7 +44,6 @@ import Data.Maybe (fromMaybe, isJust) import Data.Monoid (Sum (..)) import Data.Set qualified as Set import Data.Typeable (Typeable) -import System.Random (mkStdGen) import Ouroboros.Network.Channel import Ouroboros.Network.ControlMessage (ControlMessage (..), ControlMessageSTM) @@ -128,7 +125,7 @@ instance Arbitrary TxSubmissionState where newtype TxStateTrace peeraddr txid = - TxStateTrace (SharedTxState peeraddr txid (Tx txid)) + TxStateTrace (SharedTxState peeraddr txid) type TxStateTraceType = TxStateTrace PeerAddr TxId @@ -139,7 +136,6 @@ runTxSubmission , MonadEvaluate m , MonadFork m , MonadMask m - , MonadMVar m , MonadSay m , MonadST m , MonadLabelledSTM m @@ -156,7 +152,6 @@ runTxSubmission , Typeable txid , Show peeraddr , Ord peeraddr - , Hashable peeraddr , Typeable peeraddr , txid ~ Int @@ -171,86 +166,72 @@ runTxSubmission -> TxDecisionPolicy -> m ([Tx txid], [[Tx txid]]) -- ^ inbound and outbound mempools -runTxSubmission tracer tracerTxLogic st0 txDecisionPolicy = do +runTxSubmission tracer _tracerTxLogic st0 txDecisionPolicy = do st <- traverse (\(b, c, d, e) -> do mempool <- newMempool b (outChannel, inChannel) <- createConnectedChannels return (mempool, c, d, e, outChannel, inChannel) ) st0 inboundMempool <- emptyMempool - let txRng = mkStdGen 42 -- TODO - txMap = Map.fromList [ (getTxId tx, tx) + let txMap = Map.fromList [ (getTxId tx, tx) | (txs, _, _, _) <- Map.elems st0 , tx <- txs] - - txChannelsVar <- newMVar (TxChannels Map.empty) - txMempoolSem <- newTxMempoolSem duplicateTxIdsVar <- Lazy.newTVarIO [] - sharedTxStateVar <- newSharedTxStateVar txRng + sharedTxStateVar <- newSharedTxStateVar emptySharedTxState + txCountersVar <- newTxSubmissionCountersVar mempty traceTVarIO sharedTxStateVar \_ -> return . TraceDynamic . TxStateTrace labelTVarIO sharedTxStateVar "shared-tx-state" - withAsync (decisionLogicThreads tracerTxLogic sayTracer - txDecisionPolicy txChannelsVar sharedTxStateVar) $ \a -> do - -- Construct txSubmission outbound client - let clients = (\(addr, (mempool {- txs -}, ctrlMsgSTM, outDelay, _, outChannel, _)) -> do - let client = txSubmissionOutbound - (Tracer $ say . show) - (NumTxIdsToAck $ getNumTxIdsToReq - $ maxUnacknowledgedTxIds txDecisionPolicy) - (getMempoolReader mempool) - (maxBound :: TestVersion) - ctrlMsgSTM - runPeerWithLimits (("OUTBOUND " ++ show addr,) `contramap` tracer) - txSubmissionCodec2 - (byteLimitsTxSubmission2 (fromIntegral . BSL.length)) - timeLimitsTxSubmission2 - (maybe id delayChannel outDelay outChannel) - (txSubmissionClientPeer client) - ) - <$> Map.assocs st - - -- Construct txSubmission inbound server - servers = (\(addr, (_, _, _, inDelay, _, inChannel)) -> - withPeer tracerTxLogic - txChannelsVar - txMempoolSem - txDecisionPolicy - sharedTxStateVar - (getMempoolReader inboundMempool) - (getMempoolWriter duplicateTxIdsVar inboundMempool) - getTxSize - addr $ \api -> do - let server = - txSubmissionInboundV2 sayTracer --verboseTracer - NoTxSubmissionInitDelay - (getMempoolWriter duplicateTxIdsVar - inboundMempool) - api - runPipelinedPeerWithLimits - - (("INBOUND " ++ show addr,) `contramap` sayTracer) - txSubmissionCodec2 - (byteLimitsTxSubmission2 (fromIntegral . BSL.length)) - timeLimitsTxSubmission2 - (maybe id delayChannel inDelay inChannel) - (txSubmissionServerPeerPipelined server) - ) <$> Map.assocs st - - -- Run clients and servers - withAsyncAll (zip clients servers) $ \as -> do - _ <- waitAllServers as - -- cancel decision logic thread - cancel a - - inmp <- readMempool inboundMempool - dupTxIds <- Lazy.readTVarIO duplicateTxIdsVar - let outmp = map (\(txs, _, _, _) -> txs) - $ Map.elems st0 - dupTxs = [ txMap Map.! txid | txid <- dupTxIds] - - return (inmp <> dupTxs, outmp) + let clients = (\(addr, (mempool {- txs -}, ctrlMsgSTM, outDelay, _, outChannel, _)) -> do + let client = txSubmissionOutbound + (Tracer $ say . show) + (NumTxIdsToAck $ getNumTxIdsToReq + $ maxUnacknowledgedTxIds txDecisionPolicy) + (getMempoolReader mempool) + (maxBound :: TestVersion) + ctrlMsgSTM + runPeerWithLimits (("OUTBOUND " ++ show addr,) `contramap` tracer) + txSubmissionCodec2 + (byteLimitsTxSubmission2 (fromIntegral . BSL.length)) + timeLimitsTxSubmission2 + (maybe id delayChannel outDelay outChannel) + (txSubmissionClientPeer client) + ) + <$> Map.assocs st + + servers = (\(addr, (_, _, _, inDelay, _, inChannel)) -> + withPeer txDecisionPolicy + (getMempoolReader inboundMempool) + sharedTxStateVar + txCountersVar + addr $ \api -> do + let server = + txSubmissionInboundV2 sayTracer + NoTxSubmissionInitDelay + (getMempoolReader inboundMempool) + (getMempoolWriter duplicateTxIdsVar inboundMempool) + getTxSize + api + runPipelinedPeerWithLimits + (("INBOUND " ++ show addr,) `contramap` sayTracer) + txSubmissionCodec2 + (byteLimitsTxSubmission2 (fromIntegral . BSL.length)) + timeLimitsTxSubmission2 + (maybe id delayChannel inDelay inChannel) + (txSubmissionServerPeerPipelined server) + ) <$> Map.assocs st + + withAsyncAll (zip clients servers) $ \as -> do + _ <- waitAllServers as + + inmp <- readMempool inboundMempool + dupTxIds <- Lazy.readTVarIO duplicateTxIdsVar + let outmp = map (\(txs, _, _, _) -> txs) + $ Map.elems st0 + dupTxs = [ txMap Map.! txid | txid <- dupTxIds] + + return (inmp <> dupTxs, outmp) where waitAllServers :: [(Async m x, Async m x)] -> m [Either SomeException x] waitAllServers [] = return [] @@ -479,28 +460,17 @@ prop_sharedTxStateInvariant initialState@(TxSubmissionState st0 _) = . counterexample (show err) $ False Right _ -> - let lookBack, tr' :: [TxStateTraceType] - lookBack = Trace.toList $ traceSelectTraceEventsDynamic tr - tr' = drop 1 lookBack + let tracedStates :: [TxStateTraceType] + tracedStates = Trace.toList $ traceSelectTraceEventsDynamic tr in counterexample pTrace case foldMap (\case - (TxStateTrace stBack, TxStateTrace st)-> - (Every . counterexample (show st) $ - sharedTxStateInvariant WeakInvariant st - .&&. let inflight = Map.keysSet $ inflightTxs st - buffered = Map.keysSet $ bufferedTxs st - inflightBack = Map.keysSet $ inflightTxs stBack - in - -- here we account for a very slow peer from whom we requested - -- a transaction, but it didn't arrive until we have also requested - -- it from another peer, received it, and placed it into the mempool, - -- and so it ended up in bufferedTxs when the first one is still - -- in flight. It is an error when the opposite happens. - null $ (inflight Set.\\ inflightBack) `Set.intersection` buffered - , Sum 1 - ) + TxStateTrace st -> + ( Every . counterexample (show st) $ + sharedTxStateInvariant WeakInvariant st + , Sum 1 + ) ) - (zip lookBack tr') + tracedStates of (p, Sum c) -> label ("number of txs: " ++ @@ -513,7 +483,6 @@ prop_sharedTxStateInvariant initialState@(TxSubmissionState st0 _) = ++ renderRanges 100 c) $ p - -- -- Utils -- diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index ff1e58eb561..f36787129d0 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -1,125 +1,75 @@ -{-# LANGUAGE BangPatterns #-} -{-# LANGUAGE BlockArguments #-} -{-# LANGUAGE DataKinds #-} -{-# LANGUAGE DerivingVia #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE GADTs #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE NamedFieldPuns #-} -{-# LANGUAGE PackageImports #-} -{-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE StandaloneDeriving #-} -{-# LANGUAGE TupleSections #-} - -{-# LANGUAGE CPP #-} - -{-# OPTIONS_GHC -Wno-orphans #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE RankNTypes #-} module Test.Ouroboros.Network.TxSubmission.TxLogic ( tests , ArbTxDecisionPolicy (..) , PeerAddr + , ArbSharedTxState (..) + , ArbSharedPeerState (..) + , ArbPeerTxLocalState (..) , sharedTxStateInvariant , InvariantStrength (..) - -- * Utils - , mkDecisionContext ) where -import Prelude hiding (seq) - -import Control.Exception (assert) -import Control.Monad.Class.MonadTime.SI (Time (..)) - -import Data.Foldable as Foldable (fold, foldl', toList) -import Data.List (intercalate, isPrefixOf, isSuffixOf, mapAccumR, nub, - stripPrefix) -import Data.Map.Merge.Strict qualified as Map -import Data.Map.Strict (Map) +import Control.Monad.Class.MonadTime.SI (Time (..), addTime, diffTime) +import Data.Foldable (foldl', toList) +import Data.Function (on) +import Data.IntMap.Strict qualified as IntMap +import Data.IntSet qualified as IntSet +import Data.List (nub, nubBy) import Data.Map.Strict qualified as Map -import Data.Maybe (fromMaybe) -import Data.Monoid (Sum (..)) import Data.Sequence.Strict qualified as StrictSeq -import Data.Set (Set) import Data.Set qualified as Set -import Data.Typeable -import System.Random (StdGen, mkStdGen) -import System.Random.SplitMix (SMGen) - -import NoThunks.Class +import Data.Word (Word64) import Ouroboros.Network.Protocol.TxSubmission2.Type -import Ouroboros.Network.TxSubmission.Inbound.V2.Decision (TxDecision (..)) -import Ouroboros.Network.TxSubmission.Inbound.V2.Decision qualified as TXS import Ouroboros.Network.TxSubmission.Inbound.V2.Policy -import Ouroboros.Network.TxSubmission.Inbound.V2.State (PeerTxState (..), - SharedTxState (..)) -import Ouroboros.Network.TxSubmission.Inbound.V2.State qualified as TXS -import Ouroboros.Network.TxSubmission.Inbound.V2.Types qualified as TXS +import Ouroboros.Network.TxSubmission.Inbound.V2.Registry + (updatePeerPhase, updatePeerRequestedTxs) +import Ouroboros.Network.TxSubmission.Inbound.V2.State +import Ouroboros.Network.TxSubmission.Inbound.V2.Types import Test.Ouroboros.Network.TxSubmission.Types import Test.QuickCheck -import Test.QuickCheck.Function (apply) -import Test.QuickCheck.Gen (Gen (..)) -import Test.QuickCheck.Random (QCGen (..)) -#if !MIN_VERSION_QuickCheck(2,16,0) -import "quickcheck-monoids" Test.QuickCheck.Monoids -#endif import Test.Tasty (TestTree, testGroup) +import Test.Tasty.HUnit (Assertion, assertBool, assertFailure, testCaseSteps, + (@?=)) import Test.Tasty.QuickCheck (testProperty) -import Text.Pretty.Simple - tests :: TestTree -tests = testGroup "AppV2" - [ testGroup "TxLogic" - [ testGroup "State" - [ testGroup "Arbitrary" - [ testGroup "ArbSharedTxState" - [ testProperty "generator" prop_SharedTxState_generator - , testProperty "shrinker" $ withMaxSuccess 10 - prop_SharedTxState_shrinker - , testProperty "nothunks" prop_SharedTxState_nothunks - ] - , testGroup "ArbReceivedTxIds" - [ testProperty "generator" prop_receivedTxIds_generator - ] - , testGroup "ArbCollectTxs" - [ testProperty "generator" prop_collectTxs_generator - , testProperty "shrinker" $ withMaxSuccess 10 - prop_collectTxs_shrinker - ] - ] - , testProperty "acknowledgeTxIds" prop_acknowledgeTxIds - , testProperty "receivedTxIdsImpl" prop_receivedTxIdsImpl - , testProperty "collectTxsImpl" prop_collectTxsImpl - , testProperty "splitAcknowledgedTxIds" prop_splitAcknowledgedTxIds - , testGroup "NoThunks" - [ testProperty "receivedTxIdsImpl" prop_receivedTxIdsImpl_nothunks - , testProperty "collectTxsImpl" prop_collectTxsImpl_nothunks - ] - ] - , testGroup "Decisions" - [ testGroup "ArbDecisionContexts" - [ testProperty "generator" prop_ArbDecisionContexts_generator - , testProperty "shrinker" $ withMaxSuccess 33 - prop_ArbDecisionContexts_shrinker - ] - , testProperty "shared state invariant" prop_makeDecisions_sharedstate - , testProperty "inflight" prop_makeDecisions_inflight - , testProperty "policy" prop_makeDecisions_policy - , testProperty "acknowledged" prop_makeDecisions_acknowledged - , testProperty "exhaustive" prop_makeDecisions_exhaustive - ] - , testGroup "Registry" - [ testGroup "filterActivePeers" - [ testProperty "not limiting decisions" prop_filterActivePeers_not_limitting_decisions - ] - ] +tests = + testGroup "TxLogic" + [ testProperty "handleReceivedTxIds inserts new tx entries" prop_handleReceivedTxIds_newEntries + , testProperty "handleReceivedTxIds resolves txids already in mempool" prop_handleReceivedTxIds_knownToMempool + , testProperty "handleReceivedTxs buffers received and drops omitted txs" prop_handleReceivedTxs_buffersAndDropsOmitted + , testProperty "handleReceivedTxs drops late bodies already retained or in mempool" prop_handleReceivedTxs_dropsLateBodies + , testProperty "handleReceivedTxs penalizes omitted txs after full prune" prop_handleReceivedTxs_penalizesOmittedAfterPrune + , testProperty "handleSubmittedTxs retains accepted and drops rejected" prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected + , testProperty "nextPeerAction prioritises submitting buffered owned txs" prop_nextPeerAction_prioritisesSubmit + , testProperty "nextPeerAction claims claimable tx for best idle advertiser" prop_nextPeerAction_claimsClaimableTx + , testProperty "nextPeerAction steals expired lease for best idle advertiser" prop_nextPeerAction_claimsExpiredLease + , testProperty "nextPeerAction requests an oversized first tx within the soft budget" prop_nextPeerAction_requestsOversizedFirstTx + , testCaseSteps "nextPeerAction skips blocked available txs and requests later claimable ones" unit_nextPeerAction_skipsBlockedAvailableTxs + , testProperty "nextPeerAction submits buffered owned txs before acking" prop_nextPeerAction_ownerSubmitsBuffered + , testCaseSteps "nextPeerAction requests other txs despite a blocked buffered tx" unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx + , testCaseSteps "nextPeerAction only acks the safe prefix before a blocked buffered tx" unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx + , testProperty "nextPeerAction keeps non-owner txids unacked until resolved" prop_nextPeerAction_nonOwnerWaitsUntilResolved + , testProperty "nextPeerActionPipelined suppresses ack-only txid requests" prop_nextPeerActionPipelined_requiresAckAndReq + , testProperty "nextPeerActionPipelined requests txids when it can ack and request" prop_nextPeerActionPipelined_requestsTxIds + , testProperty "nextPeerActionPipelined opens a second outstanding body batch" prop_nextPeerActionPipelined_secondBodyBatch + , testProperty "nextPeerActionPipelined does not open a third outstanding body batch" prop_nextPeerActionPipelined_noThirdBodyBatch + , testProperty "nextPeerAction prunes expired retained txs" prop_nextPeerAction_prunesExpiredRetained + , testProperty "nextPeerAction keeps retained txs before expiry" prop_nextPeerAction_keepsRetained + , testProperty "PeerDoNothing waits for the earliest shared expiry" prop_nextPeerAction_earliestWakeDelay + , testProperty "PeerDoNothing uses the current peer generation" prop_nextPeerAction_returnsPeerGeneration + , testProperty "handleSubmittedTxs bumps idle advertiser generations" prop_handleSubmittedTxs_bumpsIdleAdvertisers + , testCaseSteps "updatePeerPhase only wakes the peer becoming idle" unit_updatePeerPhase_wakesOnlyBecomingIdlePeer + , testCaseSteps "updatePeerPhase wakes competing idle advertisers when a peer leaves idle" unit_updatePeerPhase_wakesCompetingAdvertisers + , testCaseSteps "updatePeerRequestedTxs does not wake advertisers when the owner becomes saturated" unit_updatePeerRequestedTxs_ignoresSaturation + , testCaseSteps "updatePeerRequestedTxs ignores unsaturated inflight changes" unit_updatePeerRequestedTxs_ignoresUnsaturatedChanges ] - ] - -- -- InboundState properties @@ -129,1494 +79,2033 @@ type PeerAddr = Int data InvariantStrength = WeakInvariant | StrongInvariant + deriving (Eq, Show) -- | 'InboundState` invariant. -- sharedTxStateInvariant - :: forall peeraddr txid tx. - ( Ord txid + :: forall peeraddr txid. + ( Ord peeraddr + , Ord txid + , Show peeraddr , Show txid - , Show tx ) => InvariantStrength - -> SharedTxState peeraddr txid tx + -> SharedTxState peeraddr txid -> Property -sharedTxStateInvariant invariantStrength - SharedTxState { - peerTxStates, - inflightTxs, - bufferedTxs, - referenceCounts, - timedTxs - } = - counterexample "bufferedTxs txid not a subset of unacknowledged txids" - ( - let unacknowledgedSet = - foldr (\PeerTxState { unacknowledgedTxIds } r -> - r <> Set.fromList (toList unacknowledgedTxIds)) - Set.empty txStates - timedSet = foldMap Set.fromList timedTxs - in case invariantStrength of - WeakInvariant -> - -- `submitTxToMempool` caches buffered `txs`, we check here that - -- they do not leak - counterexample ("unacknowledgedSet: " ++ show unacknowledgedSet) $ - counterexample ("bufferedTxsSet: " ++ show bufferedTxsSet) $ - counterexample ("timedTxsSet: " ++ show timedSet) $ - (bufferedTxsSet Set.\\ unacknowledgedSet) - `Set.isSubsetOf` - timedSet - - StrongInvariant -> property $ - -- the set of buffered txids must be a subset of sum of the sets of - -- unacknowledged txids - bufferedTxsSet - `Set.isSubsetOf` - unacknowledgedSet - ) - - .&&. counterexample "referenceCounts invariant violation" - ( - referenceCounts - === - -- fold unacknowledgedTxIds - Foldable.foldl' - (\m PeerTxState { unacknowledgedTxIds = unacked } -> - Foldable.foldl' - (flip $ - Map.alter (\case - Nothing -> Just $! 1 - Just cnt -> Just $! succ cnt) - ) - m - unacked - ) - -- fold timedTxs - (Foldable.foldl' - (Foldable.foldl' - (flip $ - Map.alter (\case - Nothing -> Just $! 1 - Just cnt -> Just $! succ cnt) - ) - ) - Map.empty - timedTxs - ) - txStates - ) - - .&&. counterexample ("bufferedTxs contain tx which should be gc-ed: " - ++ show (Map.keysSet bufferedTxs `Set.difference` liveSet)) - (Map.keysSet bufferedTxs `Set.isSubsetOf` liveSet) - - .&&. counterexample "inflightTxs must be a sum of requestedTxInflight sets" - (inflightTxs - === - foldr (\PeerTxState { requestedTxsInflight } m -> - Map.unionWith (+) (Map.fromSet (\_ -> 1) requestedTxsInflight) m) - Map.empty - peerTxStates) - - -- PeerTxState invariants - .&&. counterexample "PeerTxState invariant violation" - (foldMap (\ps -> Every - . counterexample (show ps) - . peerTxStateInvariant - $ ps - ) - peerTxStates) - +sharedTxStateInvariant strength SharedTxState { + sharedPeers, + sharedTxTable, + sharedRetainedTxs, + sharedTxIdToKey, + sharedKeyToTxId, + sharedNextTxKey + } = + conjoin $ + [ counterexample "sharedTxIdToKey/sharedKeyToTxId size mismatch" + (Map.size sharedTxIdToKey === IntMap.size sharedKeyToTxId) + , counterexample "active and retained tx sets overlap" + (IntSet.null (IntMap.keysSet sharedTxTable `IntSet.intersection` retainedKeysSet sharedRetainedTxs)) + , counterexample "tx-key maps disagree" + (property (keysRoundTripForward && keysRoundTripBackward)) + , counterexample "tx-key maps do not track exactly the live tx keys" + (IntMap.keysSet sharedKeyToTxId === liveKeys) + , counterexample "sharedNextTxKey does not stay ahead of all live tx keys" + (property (all (< sharedNextTxKey) (IntSet.toList liveKeys))) + ] + ++ case strength of + WeakInvariant -> + fmap checkTxEntry activeEntries + StrongInvariant -> + fmap checkTxEntry activeEntries + ++ [ counterexample "active tx entry without any liveness source" + (property (all activeEntryLive (IntMap.elems sharedTxTable))) + ] where - peerTxStateInvariant :: PeerTxState txid tx -> Property - peerTxStateInvariant PeerTxState { availableTxIds, - unacknowledgedTxIds, - unknownTxs, - requestedTxIdsInflight, - requestedTxsInflight, - requestedTxsInflightSize } = - - - counterexample ("unknownTxs is not a subset of unacknowledgedTxIds: " - ++ show (unknownTxs Set.\\ unacknowledgedTxIdsSet)) - (unknownTxs `Set.isSubsetOf` unacknowledgedTxIdsSet) - - .&&. counterexample ("availableTxs is not a subset of unacknowledgedTxIds: " - ++ show (availableTxIdsSet Set.\\ unacknowledgedTxIdsSet)) - (availableTxIdsSet `Set.isSubsetOf` unacknowledgedTxIdsSet) - - .&&. counterexample ("unacknowledged tx must be either available, unknown or buffered: " - ++ show (unacknowledgedTxIdsSet - Set.\\ availableTxIdsSet - Set.\\ unknownTxs - Set.\\ bufferedTxsSet - Set.\\ downloadedTxsSet)) - (unacknowledgedTxIdsSet - Set.\\ availableTxIdsSet - Set.\\ unknownTxs - Set.\\ downloadedTxsSet - `Set.isSubsetOf` - bufferedTxsSet - ) - - .&&. counterexample "requestedTxIdsInflight invariant violation" - (requestedTxIdsInflight >= 0) - - -- a requested tx is either available or buffered - .&&. counterexample ("requestedTxsInflight invariant violation: " - ++ show (requestedTxsInflight - Set.\\ availableTxIdsSet - Set.\\ bufferedTxsSet)) - (requestedTxsInflight Set.\\ availableTxIdsSet `Set.isSubsetOf` bufferedTxsSet) - - .&&. counterexample "requestedTxsInfightSize" - (requestedTxsInflightSize - === - fold (availableTxIds `Map.restrictKeys` requestedTxsInflight)) - - where - availableTxIdsSet :: Set txid - availableTxIdsSet = Map.keysSet availableTxIds - - unacknowledgedTxIdsSet :: Set txid - unacknowledgedTxIdsSet = Set.fromList (toList unacknowledgedTxIds) - - downloadedTxsSet :: Set txid - downloadedTxsSet = Set.unions $ map (Map.keysSet . downloadedTxs) txStates - - bufferedTxsSet = Map.keysSet bufferedTxs :: Set txid - liveSet = Map.keysSet referenceCounts :: Set txid - txStates = Map.elems peerTxStates :: [PeerTxState txid tx] - --- --- Generate `InboundState` --- - --- | PeerTxState generator. --- --- `mkArbPeerTxState` is the smart constructor. --- -data ArbPeerTxState txid tx = - ArbPeerTxState { arbPeerTxState :: PeerTxState txid tx, - arbInflightSet :: Set tx, - -- ^ in-flight txs - arbBufferedMap :: Map txid (Maybe tx) - } - -data TxStatus = Available | Inflight | Unknown - -instance Arbitrary TxStatus where - arbitrary = oneof [ pure Available - , pure Inflight - , pure Unknown - ] - -data TxMask tx = TxAvailable tx TxStatus - -- ^ available txid with its size, the Bool indicates if it's - -- in-flight or not - | TxBuffered tx - -fixupTxMask :: txid -> TxMask (Tx txid) -> TxMask (Tx txid) -fixupTxMask txid (TxAvailable tx status) = TxAvailable tx { getTxId = txid } status -fixupTxMask txid (TxBuffered tx) = TxBuffered tx { getTxId = txid } - - -instance Arbitrary tx => Arbitrary (TxMask tx) where - arbitrary = oneof [ TxAvailable - <$> arbitrary - <*> arbitrary - , TxBuffered <$> arbitrary - ] + liveKeys = IntMap.keysSet sharedTxTable `IntSet.union` retainedKeysSet sharedRetainedTxs + activeEntries = IntMap.toList sharedTxTable + knownPeers = Map.keysSet sharedPeers + + keysRoundTripForward = + all (\(txid, txKey) -> IntMap.lookup (unTxKey txKey) sharedKeyToTxId == Just txid) + (Map.toList sharedTxIdToKey) + + keysRoundTripBackward = + all (\(k, txid) -> Map.lookup txid sharedTxIdToKey == Just (TxKey k)) + (IntMap.toList sharedKeyToTxId) + + activeEntryLive TxEntry { txLease, txAdvertisers, txAttempts } = + leaseLive txLease + || not (Map.null txAdvertisers) + || not (Map.null txAttempts) + + leaseLive TxClaimable = False + leaseLive TxLeased {} = True + + checkTxEntry (k, txEntry@TxEntry { txLease, txAdvertisers, txAttempts }) = + counterexample ("bad active tx entry " ++ show k ++ ": " ++ show txEntry) $ + conjoin + [ property (Map.keysSet txAdvertisers `Set.isSubsetOf` knownPeers) + , property (Map.keysSet txAttempts `Set.isSubsetOf` Map.keysSet txAdvertisers) + , case txLease of + TxClaimable -> + property True + TxLeased owner _ -> + property (Map.member owner sharedPeers && Map.member owner txAdvertisers) + ] - -- TODO: implement shrinker; this can be done by writing an inverse of - -- `mkArbPeerTxState` and shrinking the unacknowledged txs & mask map. +newtype ArbTxDecisionPolicy = ArbTxDecisionPolicy TxDecisionPolicy + deriving Show +newtype ArbSharedTxState = ArbSharedTxState (SharedTxState PeerAddr TxId) + deriving Show --- | Smart constructor for `ArbPeerTxState`. --- -mkArbPeerTxState :: Ord txid - => Fun txid Bool - -> Int -- ^ txids in-flight - -> [txid] - -> Map txid (TxMask (Tx txid)) - -> ArbPeerTxState txid (Tx txid) -mkArbPeerTxState mempoolHasTxFun txIdsInflight unacked txMaskMap = - ArbPeerTxState - PeerTxState { unacknowledgedTxIds = StrictSeq.fromList unacked, - availableTxIds, - requestedTxIdsInflight, - requestedTxsInflight, - requestedTxsInflightSize, - unknownTxs, - score = 0, - scoreTs = Time 0, - downloadedTxs = Map.empty, - toMempoolTxs = Map.empty } - (Set.fromList $ Map.elems inflightMap) - bufferedMap - where - mempoolHasTx = apply mempoolHasTxFun - availableTxIds = Map.fromList - [ (txid, getTxAdvSize tx) | (txid, TxAvailable tx _) <- Map.assocs txMaskMap - , not (mempoolHasTx txid) - ] - unknownTxs = Set.fromList - [ txid | (txid, TxAvailable _ Unknown) <- Map.assocs txMaskMap - , not (mempoolHasTx txid) - ] - - requestedTxIdsInflight = fromIntegral txIdsInflight - requestedTxsInflightSize = foldMap getTxAdvSize inflightMap - requestedTxsInflight = Map.keysSet inflightMap - - -- exclude `txid`s which are already in the mempool, we never request such - -- `txid`s - -- - -- TODO: this should be lifted, we might have the same txid in-flight from - -- multiple peers, one will win the race and land in the mempool first - inflightMap = Map.fromList - [ (txid, tx) - | (txid, TxAvailable tx Inflight) <- Map.assocs txMaskMap - , not (mempoolHasTx txid) - ] - - bufferedMap = Map.fromList - [ (txid, Nothing) - | txid <- Map.keys txMaskMap - , mempoolHasTx txid - ] - `Map.union` - Map.fromList - [ (txid, mtx) - | (txid, TxBuffered tx) <- Map.assocs txMaskMap - , let !mtx = if mempoolHasTx txid - then Nothing - else Just $! tx { getTxId = txid } - ] - - -genArbPeerTxState - :: forall txid. - ( Arbitrary txid - , Ord txid - ) - => Fun txid Bool - -> Int -- ^ max txids inflight - -> Gen (ArbPeerTxState txid (Tx txid)) -genArbPeerTxState mempoolHasTxFun maxTxIdsInflight = do - -- unacknowledged sequence - unacked <- arbitrary - -- generate `Map txid (TxMask tx)` - txIdsInflight <- choose (0, maxTxIdsInflight) - txMap <- Map.fromList - <$> traverse (\txid -> (\a -> (txid, fixupTxMask txid a)) <$> arbitrary) - (nub unacked) - return $ mkArbPeerTxState mempoolHasTxFun txIdsInflight unacked txMap - - -genSharedTxState - :: forall txid. - ( Arbitrary txid - , Ord txid - , Function txid - , CoArbitrary txid - ) - => Int -- ^ max txids inflight - -> Gen ( Fun txid Bool - , (PeerAddr, PeerTxState txid (Tx txid)) - , SharedTxState PeerAddr txid (Tx txid) - , Map PeerAddr (ArbPeerTxState txid (Tx txid)) - ) -genSharedTxState maxTxIdsInflight = do - _mempoolHasTxFun@(Fun (_, _, x) _) <- arbitrary :: Gen (Fun Bool Bool) - let mempoolHasTxFun = Fun (function (const False), False, x) (const False) - pss <- listOf1 (genArbPeerTxState mempoolHasTxFun maxTxIdsInflight) - seed <- arbitrary - - let pss' :: [(PeerAddr, ArbPeerTxState txid (Tx txid))] - pss' = [0..] `zip` pss - - peer <- choose (0, length pss - 1) - - let st :: SharedTxState PeerAddr txid (Tx txid) - st = fixupSharedTxState - (apply mempoolHasTxFun) - SharedTxState { - peerTxStates = Map.fromList - [ (peeraddr, arbPeerTxState) - | (peeraddr, ArbPeerTxState { arbPeerTxState }) - <- pss' - ], - inflightTxs = Foldable.foldl' (Map.unionWith (+)) Map.empty - [ Map.fromSet (const 1) (Set.map getTxId arbInflightSet) - | ArbPeerTxState { arbInflightSet } - <- pss - ], - bufferedTxs = fold - [ arbBufferedMap - | ArbPeerTxState { arbBufferedMap } - <- pss - ], - referenceCounts = Map.empty, - timedTxs = Map.empty, - inSubmissionToMempoolTxs - = Map.empty, - peerRng = mkStdGen seed - } - - return ( mempoolHasTxFun - , (peer, peerTxStates st Map.! peer) - , st - , Map.fromList pss' - ) +newtype ArbSharedPeerState = ArbSharedPeerState SharedPeerState + deriving Show +newtype ArbPeerTxLocalState = ArbPeerTxLocalState (PeerTxLocalState (Tx TxId)) + deriving Show --- | Make sure `SharedTxState` is well formed. --- -fixupSharedTxState - :: Ord txid - => (txid -> Bool) -- ^ mempoolHasTx - -> SharedTxState peeraddr txid tx - -> SharedTxState peeraddr txid tx -fixupSharedTxState _mempoolHasTx st@SharedTxState { peerTxStates } = - st { peerTxStates = peerTxStates', - inflightTxs = inflightTxs', - bufferedTxs = bufferedTxs', - referenceCounts = referenceCounts' - } - where - peerTxStates' = - Map.map (\ps@PeerTxState { availableTxIds, - requestedTxsInflight } -> - - let -- requested txs must not be buffered - requestedTxsInflight' = requestedTxsInflight - Set.\\ Map.keysSet bufferedTxs' - requestedTxsInflightSize' = fold $ availableTxIds - `Map.restrictKeys` - requestedTxsInflight' - - in ps { requestedTxsInflight = requestedTxsInflight', - requestedTxsInflightSize = requestedTxsInflightSize' } - ) - peerTxStates - - inflightTxs' = foldr (\PeerTxState { requestedTxsInflight } m -> - Map.unionWith (+) - (Map.fromSet (const 1) requestedTxsInflight) - m - ) - Map.empty - peerTxStates' - - bufferedTxs' = - bufferedTxs st - `Map.restrictKeys` - foldr (\PeerTxState {unacknowledgedTxIds = unacked } r -> - r <> Set.fromList (toList unacked)) - Set.empty (Map.elems peerTxStates) - - - referenceCounts' = - Foldable.foldl' - (\m PeerTxState { unacknowledgedTxIds } -> - Foldable.foldl' - (flip $ - Map.alter (\case - Nothing -> Just $! 1 - Just cnt -> Just $! succ cnt) - ) - m - unacknowledgedTxIds - ) - Map.empty - (Map.elems peerTxStates) - - -shrinkSharedTxState :: ( Arbitrary txid - , Ord txid - , Function txid - , Ord peeraddr - ) - => (txid -> Bool) - -> SharedTxState peeraddr txid (Tx txid) - -> [SharedTxState peeraddr txid (Tx txid)] -shrinkSharedTxState mempoolHasTx st@SharedTxState { peerTxStates, - inflightTxs, - bufferedTxs } = - [ st' - | peerTxStates' <- Map.fromList <$> shrinkList (\_ -> []) (Map.toList peerTxStates) - , not (Map.null peerTxStates') - , let st' = fixupSharedTxState mempoolHasTx st { peerTxStates = peerTxStates' } - , st' /= st - ] - ++ - [ fixupSharedTxState mempoolHasTx st { inflightTxs = inflightTxs' } - | inflightTxs' <- Map.fromList <$> shrinkList (\_ -> []) (Map.toList inflightTxs) - ] - ++ - [ st - | bufferedTxs' <- Map.fromList - <$> shrinkList (\_ -> []) (Map.assocs bufferedTxs) - , let minBuffered = - foldMap - (\PeerTxState { - unacknowledgedTxIds, - availableTxIds, - unknownTxs - } - -> - Set.fromList (toList unacknowledgedTxIds) - Set.\\ Map.keysSet availableTxIds - Set.\\ unknownTxs - ) - peerTxStates - bufferedTxs'' = bufferedTxs' - `Map.union` - (bufferedTxs `Map.restrictKeys` minBuffered) - st' = fixupSharedTxState mempoolHasTx st { bufferedTxs = bufferedTxs'' } - , st' /= st - ] +instance Arbitrary ArbTxDecisionPolicy where + arbitrary = + ArbTxDecisionPolicy <$> ( + TxDecisionPolicy . getSmall . getPositive + <$> arbitrary + <*> (getSmall . getPositive <$> arbitrary) + <*> (SizeInBytes . getPositive <$> arbitrary) + <*> choose (1, 10) + <*> (getSmall . getPositive <$> arbitrary) + <*> (realToFrac <$> choose (0 :: Double, 2)) + <*> choose (0, 1) + <*> choose (0, 1800) + <*> (realToFrac <$> choose (0 :: Double, 1))) --- --- Arbitrary `SharaedTxState` instance --- + shrink (ArbTxDecisionPolicy a@TxDecisionPolicy { + maxNumTxIdsToRequest, + txsSizeInflightPerPeer, + txInflightMultiplicity }) = + [ ArbTxDecisionPolicy a { maxNumTxIdsToRequest = NumTxIdsToReq x } + | (Positive (Small x)) <- shrink (Positive (Small (getNumTxIdsToReq maxNumTxIdsToRequest))) + ] + ++ + [ ArbTxDecisionPolicy a { txsSizeInflightPerPeer = SizeInBytes s } + | Positive s <- shrink (Positive (getSizeInBytes txsSizeInflightPerPeer)) + ] + ++ + [ ArbTxDecisionPolicy a { txInflightMultiplicity = x } + | Positive (Small x) <- shrink (Positive (Small txInflightMultiplicity)) + ] -data ArbSharedTxState = - ArbSharedTxState - (Fun TxId Bool) - (SharedTxState PeerAddr TxId (Tx TxId)) - deriving Show +instance Arbitrary ArbSharedPeerState where + arbitrary = ArbSharedPeerState <$> genSharedPeerState + + shrink (ArbSharedPeerState peerState) + | peerState == defaultPeerState = [] + | otherwise = + [ ArbSharedPeerState defaultPeerState + , ArbSharedPeerState peerState + { sharedPeerGeneration = 0 + , sharedPeerRequestedTxBatches = 0 + , sharedPeerRequestedTxsSize = 0 + } + ] + where + defaultPeerState = mkSharedPeerState PeerIdle (emptyPeerScore now) + +instance Arbitrary ArbPeerTxLocalState where + arbitrary = ArbPeerTxLocalState <$> genPeerTxLocalState + + shrink (ArbPeerTxLocalState peerState) + | peerState == emptyPeerTxLocalState = [] + | otherwise = + [ ArbPeerTxLocalState emptyPeerTxLocalState + , ArbPeerTxLocalState peerState + { peerRequestedTxs = IntSet.empty + , peerRequestedTxBatches = StrictSeq.empty + , peerRequestedTxsSize = 0 + , peerRequestedTxIds = 0 + , peerDownloadedTxs = IntMap.empty + } + ] instance Arbitrary ArbSharedTxState where - arbitrary = do - Small maxTxIdsInflight <- arbitrary - (mempoolHasTx, _, sharedTxState, _) <- genSharedTxState maxTxIdsInflight - return $ ArbSharedTxState mempoolHasTx sharedTxState - - shrink (ArbSharedTxState mempoolHasTx st) = - [ ArbSharedTxState mempoolHasTx st' - | st' <- shrinkSharedTxState (apply mempoolHasTx) st + arbitrary = ArbSharedTxState <$> genSharedTxState + + shrink (ArbSharedTxState sharedState) + | sharedState == emptySharedTxState = [] + | otherwise = ArbSharedTxState <$> shrinkSharedTxState sharedState + +-- Verifies that handleReceivedTxIds interns new txids, adds leased entries +-- for them, and preserves unrelated shared-state entries. +prop_handleReceivedTxIds_newEntries + :: Positive Int + -> ArbSharedTxState + -> NonEmptyList (TxId, Positive Int) + -> Positive Int + -> Property +prop_handleReceivedTxIds_newEntries (Positive peeraddr) (ArbSharedTxState sharedState0) (NonEmpty txids0) (Positive extraRequested) = + conjoin + [ peerRequestedTxIds peerState' === fromIntegral extraRequested + , StrictSeq.length (peerUnacknowledgedTxIds peerState') === length txidsAndSizes + , toList (peerUnacknowledgedTxIds peerState') === fmap (\(txid, _) -> lookupKeyOrFail txid sharedState') txidsAndSizes + , IntMap.size (peerAvailableTxIds peerState') === length txidsAndSizes + , sharedPeers sharedState' === sharedPeers sharedState0 + , IntMap.restrictKeys (sharedTxTable sharedState') oldKeys === sharedTxTable sharedState0 + , retainedRestrictKeys (sharedRetainedTxs sharedState') oldKeys === sharedRetainedTxs sharedState0 + , IntMap.restrictKeys (sharedKeyToTxId sharedState') oldKeys === sharedKeyToTxId sharedState0 + , sharedGeneration sharedState' === sharedGeneration sharedState0 + 1 + , sharedNextTxKey sharedState' === sharedNextTxKey sharedState0 + length txidsAndSizes + , conjoin (fmap checkExistingTxId (Map.toList (sharedTxIdToKey sharedState0))) + , conjoin (fmap checkEntry txidsAndSizes) ] - - --- | Verify that generated `SharedTxState` has no thunks if it's evaluated to --- WHNF. --- -prop_SharedTxState_nothunks :: ArbSharedTxState -> Property -prop_SharedTxState_nothunks (ArbSharedTxState _ !st) = - case unsafeNoThunks st of - Nothing -> property True - Just ctx -> counterexample (show ctx) False - - -prop_SharedTxState_generator - :: ArbSharedTxState + where + txidsAndSizes = + freshBatchAgainstSharedState sharedState0 $ + dedupeBatch [ (abs txid + 1, mkSize txSize) | (txid, txSize) <- txids0 ] + oldKeys = IntMap.keysSet (sharedKeyToTxId sharedState0) + requestedToReply = fromIntegral (length txidsAndSizes) + peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = requestedToReply + fromIntegral extraRequested } + (peerState', sharedState') = + handleReceivedTxIds (const False) now defaultTxDecisionPolicy peeraddr requestedToReply txidsAndSizes peerState0 sharedState0 + expectedLeaseUntil = addTime (interTxSpace defaultTxDecisionPolicy) now + + checkExistingTxId (txid, txKey) = + Map.lookup txid (sharedTxIdToKey sharedState') === Just txKey + + checkEntry (txid, txSize) = + case IntMap.lookup (unTxKey (lookupKeyOrFail txid sharedState')) (sharedTxTable sharedState') of + Nothing -> counterexample ("missing tx entry for " ++ show txid) False + Just TxEntry { txLease, txAdvertisers, txAttempts } -> + conjoin + [ txLease === TxLeased peeraddr expectedLeaseUntil + , txAdvertisers === Map.singleton peeraddr (mkAdvertiser AckWhenBuffered txSize) + , txAttempts === Map.empty + ] + +-- Verifies that handleReceivedTxIds retains txids already known to the +-- mempool instead of leaving active tx entries behind. +prop_handleReceivedTxIds_knownToMempool + :: Positive Int + -> TxId + -> Positive Int -> Property -prop_SharedTxState_generator (ArbSharedTxState _ st) = sharedTxStateInvariant StrongInvariant st - - -prop_SharedTxState_shrinker - :: Fixed ArbSharedTxState +prop_handleReceivedTxIds_knownToMempool (Positive peeraddr) txid0 txSize0 = + conjoin + [ peerAvailableTxIds peerState' === IntMap.empty + , toList (peerUnacknowledgedTxIds peerState') === [key] + , IntMap.lookup (unTxKey key) (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) + , retainedLookup (unTxKey key) (sharedRetainedTxs sharedState') === Just expectedRetainUntil + , Map.lookup txid (sharedTxIdToKey sharedState') === Just key + , IntMap.lookup (unTxKey key) (sharedKeyToTxId sharedState') === Just txid + , sharedGeneration sharedState' === 1 + ] + where + txid = abs txid0 + 1 + txSize = mkSize txSize0 + requestedToReply = 1 + peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = requestedToReply } + (peerState', sharedState') = + handleReceivedTxIds (== txid) now defaultTxDecisionPolicy peeraddr requestedToReply [(txid, txSize)] peerState0 emptySharedTxState + key = lookupKeyOrFail txid sharedState' + expectedRetainUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now + +-- Verifies that handleReceivedTxs buffers received bodies and removes omitted +-- requested txs from peer and shared state. +prop_handleReceivedTxs_buffersAndDropsOmitted + :: Positive Int + -> TxId + -> TxId + -> Positive Int + -> Positive Int -> Property -prop_SharedTxState_shrinker = - property - . foldMap (\(ArbSharedTxState _ st) -> Every $ sharedTxStateInvariant StrongInvariant st) - . shrink - . getFixed - - --- --- `receivedTxIdsImpl` properties --- - - -data ArbReceivedTxIds = - ArbReceivedTxIds (Fun TxId Bool) -- ^ mempoolHasTx - [Tx TxId] -- ^ some txs to acknowledge - PeerAddr -- ^ peer address - (PeerTxState TxId (Tx TxId)) - -- ^ peer state - (SharedTxState PeerAddr TxId (Tx TxId)) - -- ^ initial state - deriving Show - -instance Arbitrary ArbReceivedTxIds where - arbitrary = do - Small maxTxIdsInflight <- arbitrary - (mempoolHasTxFun, (peeraddr, ps), st, psMap) <- genSharedTxState maxTxIdsInflight - txsToAck <- sublistOf (Set.toList $ arbInflightSet (psMap Map.! peeraddr)) - pure $ ArbReceivedTxIds - mempoolHasTxFun - txsToAck - peeraddr - ps - st - - shrink (ArbReceivedTxIds mempoolHasTxFun txs peeraddr ps st) = - [ ArbReceivedTxIds mempoolHasTxFun txs' peeraddr ps st - | txs' <- shrink txs +prop_handleReceivedTxs_buffersAndDropsOmitted (Positive peeraddr) txidA0 txidB0 txSizeA0 txSizeB0 = + txidA /= txidB ==> + conjoin + [ omittedCount === 1 + , lateCount === 0 + , peerRequestedTxs peerState' === IntSet.empty + , peerRequestedTxsSize peerState' === 0 + , peerDownloadedTxs peerState' === IntMap.singleton kA txA + , peerAvailableTxIds peerState' === IntMap.empty + , fmap (Map.lookup peeraddr . txAttempts) (IntMap.lookup kA (sharedTxTable sharedState')) === Just (Just TxBuffered) + , IntMap.lookup kB (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) + , retainedLookup kB (sharedRetainedTxs sharedState') === Nothing + , Map.lookup txidB (sharedTxIdToKey sharedState') === Nothing + , IntMap.lookup kB (sharedKeyToTxId sharedState') === Nothing + , sharedGeneration sharedState' === 1 ] - ++ - [ ArbReceivedTxIds - mempoolHasTxFun' txs peeraddr ps - (fixupSharedTxState (apply mempoolHasTxFun') st) - | mempoolHasTxFun' <- shrink mempoolHasTxFun + where + txidA = abs txidA0 + 1 + txidB = abs txidB0 + 2 + txSizeA = mkSize txSizeA0 + txSizeB = mkSize txSizeB0 + txA = mkTx txidA txSizeA + sharedState0 = + let st = mkSharedState [txidA, txidB] + keyA' = lookupKeyOrFail txidA st + keyB' = lookupKeyOrFail txidB st in + st { + sharedTxTable = IntMap.fromList + [ (unTxKey keyA', mkTxEntry peeraddr txSizeA (Just TxDownloading)) + , (unTxKey keyB', mkTxEntry peeraddr txSizeB (Just TxDownloading)) + ] + } + keyA = lookupKeyOrFail txidA sharedState0 + keyB = lookupKeyOrFail txidB sharedState0 + kA = unTxKey keyA + kB = unTxKey keyB + peerState0 = emptyPeerTxLocalState + { peerAvailableTxIds = IntMap.fromList [(kA, txSizeA), (kB, txSizeB)] + , peerRequestedTxs = IntSet.fromList [kA, kB] + , peerRequestedTxBatches = StrictSeq.singleton (mkRequestedTxBatch [keyA, keyB] (txSizeA + txSizeB)) + , peerRequestedTxsSize = txSizeA + txSizeB + } + (omittedCount, lateCount, peerState', sharedState') = handleReceivedTxs (const False) now defaultTxDecisionPolicy peeraddr [(txidA, txA)] peerState0 sharedState0 + +-- Verifies that handleReceivedTxs drops late bodies when the tx is already +-- retained or already present in the mempool. +prop_handleReceivedTxs_dropsLateBodies + :: Positive Int + -> TxId + -> Positive Int + -> Property +prop_handleReceivedTxs_dropsLateBodies (Positive peeraddr) txid0 txSize0 = + conjoin + [ omittedRetained === 0 + , lateRetained === 1 + , peerStateRetained' === peerState0 + { peerAvailableTxIds = IntMap.empty + , peerRequestedTxs = IntSet.empty + , peerRequestedTxBatches = StrictSeq.empty + , peerRequestedTxsSize = 0 + } + , sharedTxTable sharedStateRetained' === IntMap.empty + , retainedLookup k (sharedRetainedTxs sharedStateRetained') === Just retainedUntil + , omittedMempool === 0 + , lateMempool === 1 + , peerStateMempool' === peerState0 + { peerAvailableTxIds = IntMap.empty + , peerRequestedTxs = IntSet.empty + , peerRequestedTxBatches = StrictSeq.empty + , peerRequestedTxsSize = 0 + } + , sharedTxTable sharedStateMempool' === IntMap.empty + , retainedLookup k (sharedRetainedTxs sharedStateMempool') === Just retainedUntil + ] + where + txid = abs txid0 + 1 + txSize = mkSize txSize0 + tx = mkTx txid txSize + sharedStateBase = + let st = mkSharedState [txid] + key' = lookupKeyOrFail txid st in + st { + sharedTxTable = IntMap.singleton (unTxKey key') (mkTxEntry peeraddr txSize Nothing) + } + key = lookupKeyOrFail txid sharedStateBase + k = unTxKey key + retainedUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now + peerState0 :: PeerTxLocalState (Tx TxId) + peerState0 = emptyPeerTxLocalState + { peerAvailableTxIds = IntMap.singleton k txSize + , peerRequestedTxs = IntSet.singleton k + , peerRequestedTxBatches = StrictSeq.singleton (mkRequestedTxBatch [key] txSize) + , peerRequestedTxsSize = txSize + } + sharedStateRetained0 = sharedStateBase { + sharedTxTable = IntMap.empty, + sharedRetainedTxs = retainedSingleton k retainedUntil + } + (omittedRetained, lateRetained, peerStateRetained', sharedStateRetained') = + handleReceivedTxs (const False) now defaultTxDecisionPolicy peeraddr [(txid, tx)] peerState0 sharedStateRetained0 + (omittedMempool, lateMempool, peerStateMempool', sharedStateMempool') = + handleReceivedTxs (== txid) now defaultTxDecisionPolicy peeraddr [(txid, tx)] peerState0 sharedStateBase + +-- Verifies that omitting a requested body still counts as a penalty even if +-- the tx has already been fully pruned from shared state by the time the +-- reply is processed. +prop_handleReceivedTxs_penalizesOmittedAfterPrune + :: Positive Int + -> TxId + -> Positive Int + -> Property +prop_handleReceivedTxs_penalizesOmittedAfterPrune (Positive peeraddr) txid0 txSize0 = + conjoin + [ omittedCount === 1 + , lateCount === 0 + , peerAvailableTxIds peerState' === IntMap.empty + , peerRequestedTxs peerState' === IntSet.empty + , peerRequestedTxBatches peerState' === StrictSeq.empty + , peerRequestedTxsSize peerState' === 0 + , peerDownloadedTxs peerState' === (IntMap.empty :: IntMap.IntMap (Tx TxId)) + , sharedTxTable sharedState' === IntMap.empty + , retainedLookup k (sharedRetainedTxs sharedState') === Nothing + , Map.lookup txid (sharedTxIdToKey sharedState') === Nothing + , IntMap.lookup k (sharedKeyToTxId sharedState') === Nothing + , sharedGeneration sharedState' === 1 + ] + where + txid = abs txid0 + 1 + txSize = mkSize txSize0 + sharedStateBase = + let st = mkSharedState [txid] + key' = lookupKeyOrFail txid st in + st { + sharedTxTable = IntMap.singleton (unTxKey key') (mkTxEntry peeraddr txSize (Just TxDownloading)) + } + key = lookupKeyOrFail txid sharedStateBase + k = unTxKey key + peerState0 = emptyPeerTxLocalState + { peerAvailableTxIds = IntMap.singleton k txSize + , peerRequestedTxs = IntSet.singleton k + , peerRequestedTxBatches = StrictSeq.singleton (mkRequestedTxBatch [key] txSize) + , peerRequestedTxsSize = txSize + } + sharedStatePruned = sharedStateBase + { sharedTxTable = IntMap.empty + , sharedRetainedTxs = retainedEmpty + , sharedTxIdToKey = Map.empty + , sharedKeyToTxId = IntMap.empty + } + (omittedCount, lateCount, peerState', sharedState') = + handleReceivedTxs (const False) now defaultTxDecisionPolicy peeraddr [] peerState0 sharedStatePruned + +-- Verifies that handleSubmittedTxs retains accepted txs and removes rejected +-- txs from the active table and tx-key maps. +prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected + :: Positive Int + -> TxId + -> TxId + -> Positive Int + -> Positive Int + -> Property +prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected (Positive peeraddr) txidA0 txidB0 txSizeA0 txSizeB0 = + txidA /= txidB ==> + conjoin + [ peerDownloadedTxs peerState' === IntMap.empty + , IntMap.lookup kA (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) + , retainedLookup kA (sharedRetainedTxs sharedState') === Just expectedRetainUntil + , Map.lookup txidA (sharedTxIdToKey sharedState') === Just keyA + , IntMap.lookup kA (sharedKeyToTxId sharedState') === Just txidA + , IntMap.lookup kB (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) + , retainedLookup kB (sharedRetainedTxs sharedState') === Nothing + , Map.lookup txidB (sharedTxIdToKey sharedState') === Nothing + , IntMap.lookup kB (sharedKeyToTxId sharedState') === Nothing + , sharedGeneration sharedState' === 1 ] - - -prop_receivedTxIds_generator - :: ArbReceivedTxIds + where + txidA = abs txidA0 + 1 + txidB = abs txidB0 + 2 + txSizeA = mkSize txSizeA0 + txSizeB = mkSize txSizeB0 + txA = mkTx txidA txSizeA + txB = mkTx txidB txSizeB + sharedState0 = + let st = mkSharedState [txidA, txidB] + keyA' = lookupKeyOrFail txidA st + keyB' = lookupKeyOrFail txidB st in + st { + sharedTxTable = IntMap.fromList + [ (unTxKey keyA', mkTxEntry peeraddr txSizeA (Just TxBuffered)) + , (unTxKey keyB', mkTxEntry peeraddr txSizeB (Just TxBuffered)) + ] + } + keyA = lookupKeyOrFail txidA sharedState0 + keyB = lookupKeyOrFail txidB sharedState0 + kA = unTxKey keyA + kB = unTxKey keyB + peerState0 = emptyPeerTxLocalState { peerDownloadedTxs = IntMap.fromList [(kA, txA), (kB, txB)] } + expectedRetainUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now + (peerState', sharedState') = handleSubmittedTxs now defaultTxDecisionPolicy peeraddr [keyA] [keyB] peerState0 sharedState0 + +-- Verifies that nextPeerAction submits buffered txs owned by the peer before +-- taking any other action. +prop_nextPeerAction_prioritisesSubmit + :: Positive Int + -> TxId + -> Positive Int -> Property -prop_receivedTxIds_generator (ArbReceivedTxIds _ someTxsToAck _peeraddr _ps st) = - label ("numToAck " ++ labelInt 100 10 (length someTxsToAck)) - . counterexample (show st) - $ sharedTxStateInvariant StrongInvariant st - - --- | This property verifies that `acknowledgeTxIds` acknowledges a prefix of --- unacknowledged txs, and that the `numTxIdsToAck` as well as `RefCoundDiff` --- are correct. --- --- It doesn't validate the returned `PeerTxState` holds it's properties as this --- needs to be done in the context of updated `SharedTxState`. This is verified --- by `prop_receivedTxIdsImpl`, `prop_collectTxsImpl` and --- `prop_makeDecisions_acknowledged`. --- -prop_acknowledgeTxIds :: ArbDecisionContextWithReceivedTxIds - -> Property -prop_acknowledgeTxIds (ArbDecisionContextWithReceivedTxIds policy st ps _ _ _) = - case TXS.acknowledgeTxIds policy st ps of - (numTxIdsToAck, txIdsToRequest, TXS.TxsToMempool txIdsTxs, TXS.RefCountDiff { TXS.txIdsToAck }, ps') | txIdsToRequest > 0 -> - counterexample "number of tx ids to ack must agree with RefCountDiff" - ( fromIntegral numTxIdsToAck - === - getSum (foldMap Sum txIdsToAck) - ) - - .&&. counterexample "acknowledged txs must form a prefix" - (let unacked = toList (unacknowledgedTxIds ps) - unacked' = toList (unacknowledgedTxIds ps') - in case unacked `stripSuffix` unacked' of - Nothing -> counterexample "acknowledged txs are not a prefix" False - Just txIdsToAck' -> - txIdsToAck - === - Map.fromListWith (+) ((,1) <$> txIdsToAck')) - - .&&. counterexample "acknowledged txs" - (let acked, txsToMempool :: Set TxId - acked = Set.fromList $ take (fromIntegral numTxIdsToAck) (toList $ unacknowledgedTxIds ps) - txsToMempool = Set.fromList $ map fst txIdsTxs - in counterexample ("numTxIdsToAck = " ++ show numTxIdsToAck) - (Set.isSubsetOf txsToMempool acked)) - .&&. counterexample "to mempool" - let dlTxs, txsToMempool, unacked' :: Set TxId - dlTxs = Map.keysSet $ downloadedTxs ps - txsToMempool = Set.fromList $ map fst txIdsTxs - unacked' = Set.fromList $ toList (unacknowledgedTxIds ps') - in counterexample ("txsToMempool " ++ show txsToMempool ++ - " not a subset of dlTxs " ++ show dlTxs) - (Set.isSubsetOf txsToMempool dlTxs) - .&&. counterexample ("txsToMempool" ++ show txsToMempool ++ - " not disjoint following decision from unacked " ++ show unacked') - (Set.null (Set.intersection unacked' txsToMempool)) - - _otherwise -> property True +prop_nextPeerAction_prioritisesSubmit (Positive peeraddr) txid0 txSize0 = + case peerAction of + PeerSubmitTxs [txKey] -> + conjoin + [ txKey === key + , peerState' === peerState0 + , sharedState' === sharedState0 + ] + _ -> counterexample ("unexpected peer action: " ++ show peerAction) False where - stripSuffix :: Eq a => [a] -> [a] -> Maybe [a] - stripSuffix as suffix = - reverse <$> reverse suffix `stripPrefix` reverse as - - --- | Verify 'inboundStateInvariant' when acknowledging a sequence of txs. --- -prop_receivedTxIdsImpl - :: ArbReceivedTxIds + txid = abs txid0 + 1 + txSize = mkSize txSize0 + tx = mkTx txid txSize + key = TxKey 0 + k = unTxKey key + sharedState0 = emptySharedTxState + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxLeased peeraddr (addTime 10 now) + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenBuffered txSize) + , txTieBreakSalt = txid + , txAttempts = Map.singleton peeraddr TxBuffered + } + , sharedTxIdToKey = Map.singleton txid key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + } + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy + , peerDownloadedTxs = IntMap.singleton k tx + } + (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + +-- Verifies that nextPeerAction leases a claimable tx to the best idle +-- advertiser and requests its body. +prop_nextPeerAction_claimsClaimableTx + :: Positive Int + -> Positive Int + -> Positive Int + -> TxId + -> Positive Int -> Property -prop_receivedTxIdsImpl (ArbReceivedTxIds mempoolHasTxFun txs peeraddr ps st) = - -- InboundState invariant - counterexample - ( "Unacknowledged in mempool: " ++ - show (apply mempoolHasTxFun <$> toList (unacknowledgedTxIds ps)) ++ "\n" - ++ "InboundState invariant violation:\n" ++ - show st' - ) - (sharedTxStateInvariant StrongInvariant st') - - -- unacknowledged txs are well formed - .&&. counterexample "unacknowledged txids are not well formed" - ( let unacked = toList $ unacknowledgedTxIds ps <> txidSeq - unacked' = toList $ unacknowledgedTxIds ps' - in counterexample ("old & received: " ++ show unacked ++ "\n" ++ - "new: " ++ show unacked') $ - unacked' `isSuffixOf` unacked - ) - - .&&. -- `receivedTxIdsImpl` doesn't acknowledge any `txids` - counterexample "acknowledged property violation" - ( let unacked = toList $ unacknowledgedTxIds ps - unacked' = toList $ unacknowledgedTxIds ps' - in unacked `isPrefixOf` unacked' - ) +prop_nextPeerAction_claimsClaimableTx (Positive peerA0) (Positive peerB0) (Positive peerC0) txid0 txSize0 = + distinctPeers ==> + case peerAction of + PeerRequestTxs txKeys -> + conjoin + [ txKeys === [key] + , peerRequestedTxs peerState' === IntSet.singleton k + , txLease (lookupEntryOrFail key sharedState') === + TxLeased peerA (addTime (interTxSpace defaultTxDecisionPolicy) now) + ] + _ -> counterexample ("unexpected peer action: " ++ show peerAction) False where - st' = TXS.receivedTxIdsImpl (apply mempoolHasTxFun) - peeraddr 0 txidSeq txidMap st - ps' = peerTxStates st' Map.! peeraddr - - txidSeq = StrictSeq.fromList (getTxId <$> txs) - txidMap = Map.fromList [ (getTxId tx, getTxSize tx) | tx <- txs ] - - --- | Verify that `SharedTxState` returned by `receivedTxIdsImpl` if evaluated --- to WHNF it doesn't contain any thunks. --- -prop_receivedTxIdsImpl_nothunks - :: ArbReceivedTxIds + peerA = peerA0 + peerB = peerB0 + 1000 + peerC = peerC0 + 2000 + distinctPeers = peerA /= peerB && peerA /= peerC && peerB /= peerC + txid = abs txid0 + 1 + txSize = mkSize txSize0 + key = TxKey 0 + k = unTxKey key + sharedState0 = emptySharedTxState + { sharedPeers = Map.fromList + [ (peerA, mkSharedPeerState PeerIdle (PeerScore 1 now)) + , (peerB, mkSharedPeerState PeerIdle (PeerScore 10 now)) + , (peerC, mkSharedPeerState PeerWaitingTxs (PeerScore 0 now)) + ] + , sharedTxIdToKey = Map.singleton txid key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxClaimable + , txAdvertisers = mkAdvertisers txSize + [ (peerA, AckWhenResolved) + , (peerB, AckWhenResolved) + , (peerC, AckWhenResolved) + ] + , txTieBreakSalt = txid + , txAttempts = Map.empty + } + } + peerState0 = emptyPeerTxLocalState { peerAvailableTxIds = IntMap.singleton k txSize } + (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peerA peerState0 sharedState0 + +-- Verifies that nextPeerAction can steal an expired lease for the best idle +-- advertiser and request that tx. +prop_nextPeerAction_claimsExpiredLease + :: Positive Int + -> Positive Int + -> Positive Int + -> TxId + -> Positive Int -> Property -prop_receivedTxIdsImpl_nothunks (ArbReceivedTxIds mempoolHasTxFun txs peeraddr _ st) = - case TXS.receivedTxIdsImpl (apply mempoolHasTxFun) - peeraddr 0 txidSeq txidMap st of - !st' -> case unsafeNoThunks st' of - Nothing -> property True - Just ctx -> counterexample (show ctx) False +prop_nextPeerAction_claimsExpiredLease (Positive oldOwner0) (Positive peerA0) (Positive peerB0) txid0 txSize0 = + distinctPeers ==> + case peerAction of + PeerRequestTxs txKeys -> + conjoin + [ txKeys === [key] + , peerRequestedTxs peerState' === IntSet.singleton k + , txLease (lookupEntryOrFail key sharedState') === + TxLeased peerA (addTime (interTxSpace defaultTxDecisionPolicy) now) + ] + _ -> counterexample ("unexpected peer action: " ++ show peerAction) False where - txidSeq = StrictSeq.fromList (getTxId <$> txs) - txidMap = Map.fromList [ (getTxId tx, getTxSize tx) | tx <- txs ] - - --- --- `collectTxs` properties --- - - -data ArbCollectTxs = - ArbCollectTxs (Fun TxId Bool) -- ^ mempoolHasTx - (Map TxId SizeInBytes) -- ^ requested txid's - (Map TxId (Tx TxId)) -- ^ received txs - PeerAddr -- ^ peeraddr - (PeerTxState TxId (Tx TxId)) - (SharedTxState PeerAddr TxId (Tx TxId)) - -- ^ 'InboundState' - deriving Show - - -instance Arbitrary ArbCollectTxs where - arbitrary = do - Small maxTxIdsInflight <- arbitrary - ( mempoolHasTxFun - , (peeraddr, ps@PeerTxState { availableTxIds, - requestedTxIdsInflight, - requestedTxsInflight, - requestedTxsInflightSize }) - , st - , _ - ) - <- genSharedTxState maxTxIdsInflight - requestedTxIds <- take (fromIntegral requestedTxIdsInflight) - <$> sublistOf (toList requestedTxsInflight) - - -- Limit the requested `txid`s to satisfy `requestedTxsInflightSize`. - let requestedTxIds' = fmap fst - $ takeWhile (\(_,s) -> s <= requestedTxsInflightSize) - $ zip requestedTxIds - (scanl1 (<>) [availableTxIds Map.! txid | txid <- requestedTxIds ]) - - receivedTx <- sublistOf requestedTxIds' - >>= traverse (\txid -> do - -- real size, which might be different from - -- the advertised size - size <- frequency [ (9, pure (availableTxIds Map.! txid)) - , (1, chooseEnum (0, maxTxSize)) - ] - - valid <- frequency [(4, pure True), (1, pure False)] - pure $ Tx { getTxId = txid, - getTxSize = size, - -- `availableTxIds` contains advertised sizes - getTxAdvSize = availableTxIds Map.! txid, - getTxValid = valid, - getTxParent = Nothing }) - - pure $ assert (foldMap getTxAdvSize receivedTx <= requestedTxsInflightSize) - $ ArbCollectTxs mempoolHasTxFun - (Map.fromList [ (txid, advSize) - | txid <- requestedTxIds' - , let advSize = availableTxIds Map.! txid - ]) - (Map.fromList [ (getTxId tx, tx) | tx <- receivedTx ]) - peeraddr - ps - st - - shrink (ArbCollectTxs mempoolHasTx requestedTxs receivedTxs peeraddr ps st) = - [ ArbCollectTxs mempoolHasTx - (Map.restrictKeys requestedTxs requestedTxs') - (receivedTxs `Map.restrictKeys` requestedTxs') - peeraddr ps st - | requestedTxs' <- Set.fromList <$> shrinkList (\_ -> []) (Map.keys requestedTxs) - ] - ++ - [ ArbCollectTxs mempoolHasTx - requestedTxs - (receivedTxs `Map.restrictKeys` receivedTxIds) - peeraddr ps st - | receivedTxIds <- Set.fromList <$> shrinkList (\_ -> []) (Map.keys receivedTxs) + oldOwner = oldOwner0 + peerA = peerA0 + 1000 + peerB = peerB0 + 2000 + distinctPeers = oldOwner /= peerA && oldOwner /= peerB && peerA /= peerB + txid = abs txid0 + 1 + txSize = mkSize txSize0 + key = TxKey 0 + k = unTxKey key + sharedState0 = emptySharedTxState + { sharedPeers = Map.fromList + [ (oldOwner, mkSharedPeerState PeerWaitingTxs (PeerScore 0 now)) + , (peerA, mkSharedPeerState PeerIdle (PeerScore 1 now)) + , (peerB, mkSharedPeerState PeerIdle (PeerScore 10 now)) + ] + , sharedTxIdToKey = Map.singleton txid key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxLeased oldOwner (Time 0) + , txAdvertisers = mkAdvertisers txSize + [ (oldOwner, AckWhenResolved) + , (peerA, AckWhenResolved) + , (peerB, AckWhenResolved) + ] + , txTieBreakSalt = txid + , txAttempts = Map.empty + } + } + peerState0 = emptyPeerTxLocalState { peerAvailableTxIds = IntMap.singleton k txSize } + (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peerA peerState0 sharedState0 + +-- Verifies that nextPeerAction still requests an oversized first tx when it +-- is the only available choice within the soft-budget policy. +prop_nextPeerAction_requestsOversizedFirstTx + :: Positive Int + -> TxId + -> Positive Int + -> Property +prop_nextPeerAction_requestsOversizedFirstTx (Positive peeraddr) txid0 (Positive txSize0) = + case peerAction of + PeerRequestTxs [txKey] -> + conjoin + [ txKey === key + , peerRequestedTxs peerState' === IntSet.singleton k + , peerRequestedTxsSize peerState' === txSize + , txLease (lookupEntryOrFail key sharedState') === + TxLeased peeraddr (addTime (interTxSpace policy) now) + ] + _ -> counterexample ("unexpected peer action: " ++ show peerAction) False + where + txid = abs txid0 + 1 + txSize = mkSize (Positive (txSize0 + 1)) + key = TxKey 0 + k = unTxKey key + policy = defaultTxDecisionPolicy + { txsSizeInflightPerPeer = txSize - 1 + , maxOutstandingTxBatchesPerPeer = 1 + } + sharedState0 = emptySharedTxState + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + , sharedTxIdToKey = Map.singleton txid key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + , sharedTxTable = IntMap.singleton k (mkTxEntry peeraddr txSize Nothing) + } + peerState0 = emptyPeerTxLocalState + { peerAvailableTxIds = IntMap.singleton k txSize + , peerRequestedTxIds = maxNumTxIdsToRequest policy + } + (peerAction, peerState', sharedState') = nextPeerAction now policy peeraddr peerState0 sharedState0 + +-- Verifies that nextPeerAction skips available txs blocked by another +-- peer's lease and requests a later claimable tx instead. +unit_nextPeerAction_skipsBlockedAvailableTxs :: (String -> IO ()) -> Assertion +unit_nextPeerAction_skipsBlockedAvailableTxs step = do + step "Run nextPeerAction with one blocked tx and one later claimable tx" + case peerAction of + PeerRequestTxs [TxKey requested] -> do + step "Assert the later claimable tx is requested and leased" + requested @?= kClaimable + peerRequestedTxs peerState' @?= IntSet.singleton kClaimable + fmap txLease (IntMap.lookup kClaimable (sharedTxTable sharedState')) @?= + Just (TxLeased peeraddr (addTime (interTxSpace policy) testNow)) + other -> + assertFailure ("unexpected action: " ++ show other) + where + testNow = Time 100 + policy = defaultTxDecisionPolicy + peeraddr = 7 :: PeerAddr + otherPeer = 8 :: PeerAddr + kBlocked = 1 + kClaimable = 2 + peerState = emptyPeerTxLocalState + { peerAvailableTxIds = IntMap.fromList [(kBlocked, 10), (kClaimable, 11)] + } + sharedState :: SharedTxState PeerAddr TxId + sharedState = emptySharedTxState + { sharedPeers = Map.fromList + [ (peeraddr, SharedPeerState PeerIdle (emptyPeerScore testNow) 0 0 0) + , (otherPeer, SharedPeerState PeerIdle (emptyPeerScore testNow) 0 0 0) + ] + , sharedTxTable = IntMap.fromList + [ (kBlocked, TxEntry + { txLease = TxLeased otherPeer (addTime 10 testNow) + , txAdvertisers = mkAdvertisers 10 [(peeraddr, AckWhenResolved), (otherPeer, AckWhenResolved)] + , txTieBreakSalt = 0 + , txAttempts = Map.empty + }) + , (kClaimable, TxEntry + { txLease = TxClaimable + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved 11) + , txTieBreakSalt = 0 + , txAttempts = Map.empty + }) + ] + } + (peerAction, peerState', sharedState') = + nextPeerAction testNow policy peeraddr peerState sharedState + +-- Verifies that nextPeerAction submits buffered owned txs before +-- acknowledging their txids. +prop_nextPeerAction_ownerSubmitsBuffered + :: Positive Int + -> TxId + -> Positive Int + -> Property +prop_nextPeerAction_ownerSubmitsBuffered (Positive peeraddr) txid0 txSize0 = + case peerAction of + PeerSubmitTxs [txKey] -> + conjoin + [ txKey === key + , peerState' === peerState0 + , sharedState' === sharedState0 + ] + _ -> counterexample ("unexpected peer action: " ++ show peerAction) False + where + txid = abs txid0 + 1 + txSize = mkSize txSize0 + tx = mkTx txid txSize + key = TxKey 0 + k = unTxKey key + sharedState0 = emptySharedTxState + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxClaimable + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenBuffered txSize) + , txTieBreakSalt = txid + , txAttempts = Map.singleton peeraddr TxBuffered + } + , sharedTxIdToKey = Map.singleton txid key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + } + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy + , peerDownloadedTxs = IntMap.singleton k tx + } + (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + +-- Verifies that a blocked buffered tx does not prevent the peer from +-- requesting a different claimable tx body. +unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx :: (String -> IO ()) -> Assertion +unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx step = do + step "Run nextPeerAction with one blocked buffered tx and one claimable tx" + case peerAction of + PeerRequestTxs [TxKey requested] -> do + step "Assert the blocked tx stays buffered while the claimable tx is requested" + requested @?= kClaimable + peerUnacknowledgedTxIds peerState' @?= peerUnacknowledgedTxIds peerState0 + peerRequestedTxs peerState' @?= IntSet.singleton kClaimable + peerDownloadedTxs peerState' @?= peerDownloadedTxs peerState0 + txAttempts (lookupEntryOrFail blockedKey sharedState') @?= txAttempts blockedEntry + txLease (lookupEntryOrFail claimableKey sharedState') @?= + TxLeased peeraddr (addTime (interTxSpace defaultTxDecisionPolicy) now) + other -> + assertFailure ("unexpected action: " ++ show other) + where + peeraddr = 7 + submittingPeer = 8 + blockedTxid = 1 + claimableTxid = 2 + blockedSize = mkSize (Positive 10) + claimableSize = mkSize (Positive 11) + blockedKey = TxKey 1 + claimableKey = TxKey 2 + kBlocked = unTxKey blockedKey + kClaimable = unTxKey claimableKey + blockedTx = mkTx blockedTxid blockedSize + blockedEntry = TxEntry + { txLease = TxLeased peeraddr (addTime 10 now) + , txAdvertisers = mkAdvertisers blockedSize + [ (peeraddr, AckWhenBuffered) + , (submittingPeer, AckWhenResolved) + ] + , txTieBreakSalt = blockedTxid + , txAttempts = Map.fromList + [ (peeraddr, TxBuffered) + , (submittingPeer, TxSubmitting) + ] + } + sharedState0 = emptySharedTxState + { sharedPeers = Map.fromList + [ (peeraddr, mkSharedPeerState PeerIdle (emptyPeerScore now)) + , (submittingPeer, mkSharedPeerState PeerSubmittingToMempool (emptyPeerScore now)) + ] + , sharedTxTable = IntMap.fromList + [ (kBlocked, blockedEntry) + , (kClaimable, TxEntry + { txLease = TxClaimable + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved claimableSize) + , txTieBreakSalt = claimableTxid + , txAttempts = Map.empty + }) + ] + , sharedTxIdToKey = Map.fromList + [ (blockedTxid, blockedKey) + , (claimableTxid, claimableKey) + ] + , sharedKeyToTxId = IntMap.fromList + [ (kBlocked, blockedTxid) + , (kClaimable, claimableTxid) + ] + , sharedNextTxKey = 3 + } + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.fromList [blockedKey, claimableKey] + , peerAvailableTxIds = IntMap.fromList + [ (kBlocked, blockedSize) + , (kClaimable, claimableSize) + ] + , peerDownloadedTxs = IntMap.singleton kBlocked blockedTx + } + (peerAction, peerState', sharedState') = + nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + +-- Verifies that txid acknowledgements stop before a blocked buffered tx, so +-- earlier safe txids can still be acked and replaced with new txid requests. +unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx :: (String -> IO ()) -> Assertion +unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx step = do + step "Run nextPeerAction with an ackable retained tx followed by a blocked buffered tx" + case peerAction of + PeerRequestTxIds txIdsToAcknowledge txIdsToReq -> do + step "Assert only the safe prefix is acknowledged" + txIdsToAcknowledge @?= 1 + assertBool ("expected positive txIdsToReq, got " ++ show txIdsToReq) (txIdsToReq > 0) + peerUnacknowledgedTxIds peerState' @?= StrictSeq.singleton blockedKey + peerRequestedTxIds peerState' @?= txIdsToReq + txAdvertisers (lookupEntryOrFail blockedKey sharedState') @?= txAdvertisers blockedEntry + other -> + assertFailure ("unexpected action: " ++ show other) + where + peeraddr = 7 + submittingPeer = 8 + resolvedTxid = 1 + blockedTxid = 2 + blockedSize = mkSize (Positive 10) + resolvedKey = TxKey 1 + blockedKey = TxKey 2 + kResolved = unTxKey resolvedKey + kBlocked = unTxKey blockedKey + blockedTx = mkTx blockedTxid blockedSize + blockedEntry = TxEntry + { txLease = TxLeased peeraddr (addTime 10 now) + , txAdvertisers = mkAdvertisers blockedSize + [ (peeraddr, AckWhenBuffered) + , (submittingPeer, AckWhenResolved) + ] + , txTieBreakSalt = blockedTxid + , txAttempts = Map.fromList + [ (peeraddr, TxBuffered) + , (submittingPeer, TxSubmitting) + ] + } + sharedState0 = emptySharedTxState + { sharedPeers = Map.fromList + [ (peeraddr, mkSharedPeerState PeerIdle (emptyPeerScore now)) + , (submittingPeer, mkSharedPeerState PeerSubmittingToMempool (emptyPeerScore now)) + ] + , sharedTxTable = IntMap.singleton kBlocked blockedEntry + , sharedRetainedTxs = retainedSingleton kResolved (addTime 17 now) + , sharedTxIdToKey = Map.fromList + [ (resolvedTxid, resolvedKey) + , (blockedTxid, blockedKey) + ] + , sharedKeyToTxId = IntMap.fromList + [ (kResolved, resolvedTxid) + , (kBlocked, blockedTxid) + ] + , sharedNextTxKey = 3 + } + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.fromList [resolvedKey, blockedKey] + , peerDownloadedTxs = IntMap.singleton kBlocked blockedTx + } + (peerAction, peerState', sharedState') = + nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + +-- Verifies that nextPeerAction keeps non-owner txids unacknowledged until +-- the tx has resolved out of the active table. +prop_nextPeerAction_nonOwnerWaitsUntilResolved + :: Positive Int + -> Positive Int + -> TxId + -> Positive Int + -> Property +prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peeraddr0) txid0 txSize0 = + owner /= peeraddr ==> + conjoin + [ case unresolvedAction of + PeerDoNothing _ _ -> + unresolvedExpectations + PeerRequestTxIds txIdsToAcknowledge _ -> + conjoin + [ txIdsToAcknowledge === 0 + , unresolvedExpectations + ] + _ -> counterexample ("unexpected unresolved action: " ++ show unresolvedAction) False + , case resolvedAction of + PeerRequestTxIds txIdsToAcknowledge _ -> + conjoin + [ txIdsToAcknowledge === 1 + , peerUnacknowledgedTxIds resolvedPeerState' === StrictSeq.empty + ] + _ -> counterexample ("unexpected resolved action: " ++ show resolvedAction) False ] - ++ - [ ArbCollectTxs mempoolHasTx - (requestedTxs - `Map.restrictKeys` unacked - `Map.restrictKeys` inflightTxSet) - (receivedTxs - `Map.restrictKeys` unacked - `Map.restrictKeys` inflightTxSet) - peeraddr ps - st' - | let unacked = Set.fromList - . toList - . unacknowledgedTxIds - $ ps - , st'@SharedTxState { inflightTxs } <- shrinkSharedTxState (apply mempoolHasTx) st - , let inflightTxSet = Map.keysSet inflightTxs - , peeraddr `Map.member` peerTxStates st' - , st' /= st + where + owner = owner0 + 1000 + peeraddr = peeraddr0 + 2000 + txid = abs txid0 + 1 + txSize = mkSize txSize0 + key = TxKey 0 + k = unTxKey key + sharedPeers0 = Map.fromList + [ (owner, mkSharedPeerState PeerIdle (emptyPeerScore now)) + , (peeraddr, mkSharedPeerState PeerIdle (emptyPeerScore now)) ] - - -prop_collectTxs_generator - :: ArbCollectTxs + txAdvertisers0 = mkAdvertisers txSize [(owner, AckWhenBuffered), (peeraddr, AckWhenResolved)] + unresolvedSharedState = emptySharedTxState + { sharedPeers = sharedPeers0 + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxClaimable + , txAdvertisers = txAdvertisers0 + , txTieBreakSalt = txid + , txAttempts = Map.singleton owner TxBuffered + } + , sharedTxIdToKey = Map.singleton txid key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + } + resolvedSharedState = unresolvedSharedState + { sharedTxTable = IntMap.empty + , sharedRetainedTxs = retainedSingleton k (addTime 17 now) + } + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerRequestedTxIds = 0 + } + (unresolvedAction, unresolvedPeerState', unresolvedSharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 unresolvedSharedState + unresolvedExpectations = + conjoin + [ peerUnacknowledgedTxIds unresolvedPeerState' === peerUnacknowledgedTxIds peerState0 + , txAdvertisers (lookupEntryOrFail key unresolvedSharedState') === txAdvertisers (lookupEntryOrFail key unresolvedSharedState) + ] + (resolvedAction, resolvedPeerState', _) = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 resolvedSharedState + +-- Verifies that nextPeerActionPipelined does nothing when it can only +-- acknowledge txids and cannot request new ones in the same step. +prop_nextPeerActionPipelined_requiresAckAndReq + :: Positive Int + -> TxId + -> Positive Int -> Property -prop_collectTxs_generator (ArbCollectTxs _ requestedTxIds receivedTxs peeraddr - ps@PeerTxState { availableTxIds, - requestedTxsInflightSize } - st) = - counterexample "size of requested txs must not be larger than requestedTxsInflightSize" - (requestedSize <= requestedTxsInflightSize) - .&&. counterexample ("receivedTxs must be a subset of requestedTxIds " - ++ show (Map.keysSet receivedTxs Set.\\ requestedTxIdsSet)) - (Map.keysSet receivedTxs `Set.isSubsetOf` requestedTxIdsSet) - .&&. counterexample "peerTxState" - (Map.lookup peeraddr (peerTxStates st) === Just ps) - .&&. -- advertised sizes should agree with `getTxAdvSize of received txs. - property ( foldMap Every - $ Map.intersectionWith - (\advSize Tx {getTxId, getTxAdvSize} -> - counterexample (show getTxId) - $ advSize === getTxAdvSize) - requestedTxIds - receivedTxs - ) +prop_nextPeerActionPipelined_requiresAckAndReq (Positive peeraddr) txid0 _txSize0 = + case peerAction of + PeerDoNothing _ _ -> + conjoin + [ peerUnacknowledgedTxIds peerState' === peerUnacknowledgedTxIds peerState0 + , sharedState' === sharedState0 + ] + _ -> counterexample ("unexpected pipelined action: " ++ show peerAction) False where - requestedTxIdsSet = Map.keysSet requestedTxIds - requestedSize = fold (availableTxIds `Map.restrictKeys` requestedTxIdsSet) - - -prop_collectTxs_shrinker - :: Fixed ArbCollectTxs - -- ^ disabled shrinking + txid = abs txid0 + 1 + key = TxKey 0 + k = unTxKey key + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy + } + sharedState0 = emptySharedTxState + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + , sharedRetainedTxs = retainedSingleton k (addTime 17 now) + , sharedTxIdToKey = Map.singleton txid key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + } + (peerAction, peerState', sharedState') = nextPeerActionPipelined now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + +-- Verifies that nextPeerActionPipelined requests txids once it can both +-- acknowledge old txids and ask for more. +prop_nextPeerActionPipelined_requestsTxIds + :: Positive Int + -> TxId + -> Positive Int -> Property -prop_collectTxs_shrinker (Fixed txs) = - property $ foldMap (\a@(ArbCollectTxs _ _ _ _ _ st) -> - Every . counterexample (show st) $ - f a =/= f txs - .&&. sharedTxStateInvariant StrongInvariant st - ) (shrink txs) +prop_nextPeerActionPipelined_requestsTxIds (Positive peeraddr) txid0 _txSize0 = + case peerAction of + PeerRequestTxIds txIdsToAcknowledge txIdsToReq -> + conjoin + [ txIdsToAcknowledge === 1 + , counterexample ("expected positive txIdsToReq, got " ++ show txIdsToReq) (txIdsToReq > 0) + , peerUnacknowledgedTxIds peerState' === StrictSeq.empty + , peerRequestedTxIds peerState' === txIdsToReq + , sharedRetainedTxs sharedState' === sharedRetainedTxs sharedState0 + , sharedTxTable sharedState' === sharedTxTable sharedState0 + , sharedTxIdToKey sharedState' === sharedTxIdToKey sharedState0 + , sharedKeyToTxId sharedState' === sharedKeyToTxId sharedState0 + , sharedGeneration sharedState' === sharedGeneration sharedState0 + 1 + ] + _ -> counterexample ("unexpected pipelined action: " ++ show peerAction) False where - f (ArbCollectTxs _ reqSet recvMap peeraddr ps st) = (reqSet, recvMap, peeraddr, ps, st) - + txid = abs txid0 + 1 + key = TxKey 0 + k = unTxKey key + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerRequestedTxIds = 0 + } + sharedState0 = emptySharedTxState + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + , sharedRetainedTxs = retainedSingleton k (addTime 17 now) + , sharedTxIdToKey = Map.singleton txid key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + } + (peerAction, peerState', sharedState') = nextPeerActionPipelined now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + +-- Verifies that nextPeerActionPipelined opens a second outstanding body +-- batch when another downloadable tx is available. +prop_nextPeerActionPipelined_secondBodyBatch + :: Positive Int + -> TxId + -> TxId + -> Positive Int + -> Positive Int + -> Property +prop_nextPeerActionPipelined_secondBodyBatch (Positive peeraddr) txidA0 txidB0 txSizeA0 txSizeB0 = + txidA /= txidB ==> + case peerAction of + PeerRequestTxs [txKey] -> + conjoin + [ txKey === keyB + , peerRequestedTxs peerState' === IntSet.fromList [kA, kB] + , StrictSeq.length (peerRequestedTxBatches peerState') === 2 + , peerRequestedTxsSize peerState' === txSizeA + txSizeB + , fmap txLease (IntMap.lookup kB (sharedTxTable sharedState')) === + Just (TxLeased peeraddr (addTime (interTxSpace defaultTxDecisionPolicy) now)) + , fmap (Map.lookup peeraddr . txAttempts) + (IntMap.lookup kB (sharedTxTable sharedState')) === + Just (Just TxDownloading) + ] + _ -> counterexample ("unexpected pipelined action: " ++ show peerAction) False + where + txidA = abs txidA0 + 1 + txidB = abs txidB0 + 2 + txSizeA = mkSize txSizeA0 + txSizeB = mkSize txSizeB0 + keyA = TxKey 0 + keyB = TxKey 1 + kA = unTxKey keyA + kB = unTxKey keyB + peerState0 = emptyPeerTxLocalState + { peerAvailableTxIds = IntMap.singleton kB txSizeB + , peerRequestedTxs = IntSet.singleton kA + , peerRequestedTxBatches = StrictSeq.singleton (mkRequestedTxBatch [keyA] txSizeA) + , peerRequestedTxsSize = txSizeA + } + sharedState0 = emptySharedTxState + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + , sharedTxTable = IntMap.fromList + [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading)) + , (kB, TxEntry + { txLease = TxClaimable + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved txSizeB) + , txTieBreakSalt = txidB + , txAttempts = Map.empty + }) + ] + , sharedTxIdToKey = Map.fromList [(txidA, keyA), (txidB, keyB)] + , sharedKeyToTxId = IntMap.fromList [(kA, txidA), (kB, txidB)] + , sharedNextTxKey = 2 + } + (peerAction, peerState', sharedState') = nextPeerActionPipelined now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + +-- Verifies that nextPeerActionPipelined does not open a third outstanding +-- body batch once the per-peer batch limit is reached. +prop_nextPeerActionPipelined_noThirdBodyBatch + :: Positive Int + -> TxId + -> TxId + -> TxId + -> Positive Int + -> Positive Int + -> Positive Int + -> Property +prop_nextPeerActionPipelined_noThirdBodyBatch (Positive peeraddr) txidA0 txidB0 txidC0 txSizeA0 txSizeB0 txSizeC0 = + distinctTxIds ==> + case peerAction of + PeerDoNothing _ _ -> + conjoin + [ peerRequestedTxs peerState' === peerRequestedTxs peerState0 + , peerRequestedTxBatches peerState' === peerRequestedTxBatches peerState0 + , peerRequestedTxsSize peerState' === peerRequestedTxsSize peerState0 + , sharedState' === sharedState0 + ] + _ -> counterexample ("unexpected pipelined action: " ++ show peerAction) False + where + txidA = abs txidA0 + 1 + txidB = abs txidB0 + 2 + txidC = abs txidC0 + 3 + distinctTxIds = length (nub [txidA, txidB, txidC]) == 3 + txSizeA = mkSize txSizeA0 + txSizeB = mkSize txSizeB0 + txSizeC = mkSize txSizeC0 + keyA = TxKey 0 + keyB = TxKey 1 + keyC = TxKey 2 + kA = unTxKey keyA + kB = unTxKey keyB + kC = unTxKey keyC + peerState0 = emptyPeerTxLocalState + { peerAvailableTxIds = IntMap.singleton kC txSizeC + , peerRequestedTxs = IntSet.fromList [kA, kB] + , peerRequestedTxBatches = StrictSeq.fromList + [ mkRequestedTxBatch [keyA] txSizeA + , mkRequestedTxBatch [keyB] txSizeB + ] + , peerRequestedTxsSize = txSizeA + txSizeB + } + sharedState0 = emptySharedTxState + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + , sharedTxTable = IntMap.fromList + [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading)) + , (kB, mkTxEntry peeraddr txSizeB (Just TxDownloading)) + , (kC, TxEntry + { txLease = TxClaimable + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved txSizeC) + , txTieBreakSalt = txidC + , txAttempts = Map.empty + }) + ] + , sharedTxIdToKey = Map.fromList [(txidA, keyA), (txidB, keyB), (txidC, keyC)] + , sharedKeyToTxId = IntMap.fromList [(kA, txidA), (kB, txidB), (kC, txidC)] + , sharedNextTxKey = 3 + } + (peerAction, peerState', sharedState') = nextPeerActionPipelined now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + +-- Verifies that nextPeerAction prunes expired retained txs and removes their +-- tx-key mappings while the peer is idle. +prop_nextPeerAction_prunesExpiredRetained + :: Positive Int + -> TxId + -> Positive Int + -> Property +prop_nextPeerAction_prunesExpiredRetained (Positive peeraddr) txid0 _txSize0 = + case peerAction of + PeerDoNothing _ Nothing -> + conjoin + [ peerState' === idlePeerState + , IntMap.lookup k (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) + , retainedLookup k (sharedRetainedTxs sharedState') === Nothing + , Map.lookup txid (sharedTxIdToKey sharedState') === Nothing + , IntMap.lookup k (sharedKeyToTxId sharedState') === Nothing + ] + _ -> counterexample ("unexpected peer action: " ++ show peerAction) False + where + txid = abs txid0 + 1 + key = TxKey 0 + k = unTxKey key + idlePeerState :: PeerTxLocalState (Tx TxId) + idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } + sharedState0 = emptySharedTxState + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + , sharedRetainedTxs = retainedSingleton k now + , sharedTxIdToKey = Map.singleton txid key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = max 1 (k + 1) + } + (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr idlePeerState sharedState0 + +-- Verifies that nextPeerAction keeps unexpired retained txs and returns the +-- wake delay until their expiry. +prop_nextPeerAction_keepsRetained + :: Positive Int + -> TxId + -> Positive Int + -> Property +prop_nextPeerAction_keepsRetained (Positive peeraddr) txid0 _txSize0 = + case peerAction of + PeerDoNothing _ (Just wakeDelay) -> + conjoin + [ peerState' === idlePeerState + , IntMap.lookup k (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) + , retainedLookup k (sharedRetainedTxs sharedState') === Just retainUntil + , Map.lookup txid (sharedTxIdToKey sharedState') === Just key + , IntMap.lookup k (sharedKeyToTxId sharedState') === Just txid + , wakeDelay === diffTime retainUntil now + ] + _ -> counterexample ("unexpected peer action: " ++ show peerAction) False + where + txid = abs txid0 + 1 + retainUntil = addTime 17 now + key = TxKey 0 + k = unTxKey key + idlePeerState :: PeerTxLocalState (Tx TxId) + idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } + sharedState0 = emptySharedTxState + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + , sharedRetainedTxs = retainedSingleton k retainUntil + , sharedTxIdToKey = Map.singleton txid key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + } + (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr idlePeerState sharedState0 + +-- Verifies that PeerDoNothing waits until the earliest shared expiry, whether +-- it comes from a lease or a retained tx. +prop_nextPeerAction_earliestWakeDelay + :: Positive Int + -> Positive Int + -> TxId + -> TxId + -> Positive Int + -> Positive Int + -> Property +prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txidA0 txidB0 txSizeA0 _txSizeB0 = + peeraddr /= owner ==> + conjoin + [ case leaseFirstAction of + PeerDoNothing _ (Just wakeDelay) -> wakeDelay === diffTime leaseUntil now + _ -> counterexample ("unexpected lease-first action: " ++ show leaseFirstAction) False + , case retainFirstAction of + PeerDoNothing _ (Just wakeDelay) -> wakeDelay === diffTime retainUntilSoon now + _ -> counterexample ("unexpected retain-first action: " ++ show retainFirstAction) False + ] + where + owner = owner0 + 1000 + txidA = abs txidA0 + 1 + txidB = abs txidB0 + 2 + txSizeA = mkSize txSizeA0 + leaseUntil = addTime 11 now + retainUntilLater = addTime 29 now + leaseUntilLater = addTime 31 now + retainUntilSoon = addTime 13 now + keyA = TxKey 0 + keyB = TxKey 1 + idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } + sharedPeers0 = Map.fromList + [ (peeraddr, mkSharedPeerState PeerIdle (emptyPeerScore now)) + , (owner, mkSharedPeerState PeerWaitingTxs (emptyPeerScore now)) + ] + leaseFirstState = emptySharedTxState + { sharedPeers = sharedPeers0 + , sharedTxTable = IntMap.singleton (unTxKey keyA) TxEntry + { txLease = TxLeased owner leaseUntil + , txAdvertisers = mkAdvertisers txSizeA [(owner, AckWhenBuffered), (peeraddr, AckWhenResolved)] + , txTieBreakSalt = txidA + , txAttempts = Map.empty + } + , sharedRetainedTxs = retainedSingleton (unTxKey keyB) retainUntilLater + , sharedTxIdToKey = Map.fromList [(txidA, keyA), (txidB, keyB)] + , sharedKeyToTxId = IntMap.fromList [(unTxKey keyA, txidA), (unTxKey keyB, txidB)] + , sharedNextTxKey = 2 + } + retainFirstState = emptySharedTxState + { sharedPeers = sharedPeers0 + , sharedTxTable = IntMap.singleton (unTxKey keyA) TxEntry + { txLease = TxLeased owner leaseUntilLater + , txAdvertisers = mkAdvertisers txSizeA [(owner, AckWhenBuffered), (peeraddr, AckWhenResolved)] + , txTieBreakSalt = txidA + , txAttempts = Map.empty + } + , sharedRetainedTxs = retainedSingleton (unTxKey keyB) retainUntilSoon + , sharedTxIdToKey = Map.fromList [(txidA, keyA), (txidB, keyB)] + , sharedKeyToTxId = IntMap.fromList [(unTxKey keyA, txidA), (unTxKey keyB, txidB)] + , sharedNextTxKey = 2 + } + (leaseFirstAction, _, _) = nextPeerAction now defaultTxDecisionPolicy peeraddr idlePeerState leaseFirstState + (retainFirstAction, _, _) = nextPeerAction now defaultTxDecisionPolicy peeraddr idlePeerState retainFirstState --- | Verify `collectTxsImpl` properties: --- --- * verify `SharedTxState` invariant; --- * unacknowledged txids after `collectTxsImpl` must be a suffix of the --- original ones; --- * progress property: we acknowledge as many `txid`s as possible --- -prop_collectTxsImpl - :: ArbCollectTxs +-- Verifies that PeerDoNothing reports the current generation of the acting +-- peer. +prop_nextPeerAction_returnsPeerGeneration + :: Positive Int -> Property -prop_collectTxsImpl (ArbCollectTxs _mempoolHasTxFun txidsRequested txsReceived peeraddr ps st) = - - label ("number of txids inflight " ++ labelInt 25 5 (Map.size $ inflightTxs st)) $ - label ("number of txids requested " ++ labelInt 25 5 (Map.size txidsRequested)) $ - label ("number of txids received " ++ labelInt 10 2 (Map.size txsReceived)) $ - label ("hasTxSizeError " ++ show hasTxSizeErr) $ - - case TXS.collectTxsImpl getTxSize peeraddr txidsRequested txsReceived st of - Right st' | not hasTxSizeErr -> - let ps' = peerTxStates st' Map.! peeraddr in - -- InboundState invariant - counterexample - ( "InboundState invariant violation:\n" ++ show st' ++ "\n" - ++ show ps' - ) - (sharedTxStateInvariant StrongInvariant st') - - .&&. - -- `collectTxsImpl` doesn't modify unacknowledged TxId's - counterexample "acknowledged property violation" - ( let unacked = toList $ unacknowledgedTxIds ps - unacked' = toList $ unacknowledgedTxIds ps' - in unacked === unacked' - ) - - Right _ -> - counterexample "collectTxsImpl should return Left" - . counterexample (show txsReceived) - $ False - Left e | not hasTxSizeErr -> - counterexample "collectTxsImpl should return Right" $ - counterexample (show e) False - - Left (TXS.ProtocolErrorTxSizeError as) -> - counterexample (show as) - $ Set.fromList ((\(txid, _, _) -> coerceTxId txid) `map` as) - === - Map.keysSet (Map.filter (\tx -> getTxSize tx /= getTxAdvSize tx) txsReceived) - Left e -> - counterexample ("unexpected error: " ++ show e) False +prop_nextPeerAction_returnsPeerGeneration (Positive peeraddr) = + case peerAction of + PeerDoNothing generation Nothing -> generation === expectedGeneration + _ -> counterexample ("unexpected peer action: " ++ show peerAction) False + where + expectedGeneration = 7 + sharedState0 :: SharedTxState PeerAddr TxId + sharedState0 = emptySharedTxState + { sharedPeers = Map.fromList + [ ( peeraddr + , (mkSharedPeerState PeerIdle (emptyPeerScore now)) + { sharedPeerGeneration = expectedGeneration + } + ) + , ( peeraddr + 1000 + , (mkSharedPeerState PeerIdle (emptyPeerScore now)) + { sharedPeerGeneration = 11 + } + ) + ] + } + peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } + (peerAction, _, _) = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + +-- Verifies that handleSubmittedTxs bumps idle advertisers while leaving +-- submitting and waiting advertisers unchanged. +prop_handleSubmittedTxs_bumpsIdleAdvertisers + :: Positive Int + -> Positive Int + -> Positive Int + -> TxId + -> Positive Int + -> Property +prop_handleSubmittedTxs_bumpsIdleAdvertisers (Positive owner0) (Positive peerA0) (Positive peerB0) txid0 txSize0 = + owner /= peerA && owner /= peerB && peerA /= peerB ==> + conjoin + [ sharedPeerGeneration (lookupPeerOrFail peerA sharedState') === 1 + , sharedPeerGeneration (lookupPeerOrFail peerB sharedState') === 0 + , sharedPeerGeneration (lookupPeerOrFail owner sharedState') === 0 + ] + where + owner = owner0 + 1000 + peerA = peerA0 + 2000 + peerB = peerB0 + 3000 + txid = abs txid0 + 1 + txSize = mkSize txSize0 + tx = mkTx txid txSize + key = TxKey 0 + k = unTxKey key + sharedState0 = emptySharedTxState + { sharedPeers = Map.fromList + [ (owner, mkSharedPeerState PeerSubmittingToMempool (emptyPeerScore now)) + , (peerA, mkSharedPeerState PeerIdle (emptyPeerScore now)) + , (peerB, mkSharedPeerState PeerWaitingTxs (emptyPeerScore now)) + ] + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxLeased owner (addTime 10 now) + , txAdvertisers = mkAdvertisers txSize + [ (owner, AckWhenBuffered) + , (peerA, AckWhenResolved) + , (peerB, AckWhenResolved) + ] + , txTieBreakSalt = txid + , txAttempts = Map.singleton owner TxBuffered + } + , sharedTxIdToKey = Map.singleton txid key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + } + peerState0 = emptyPeerTxLocalState { peerDownloadedTxs = IntMap.singleton k tx } + (_, sharedState') = handleSubmittedTxs now defaultTxDecisionPolicy owner [key] [] peerState0 sharedState0 + +unit_updatePeerPhase_wakesOnlyBecomingIdlePeer :: (String -> IO ()) -> Assertion +unit_updatePeerPhase_wakesOnlyBecomingIdlePeer step = do + step "Update a peer from waiting to idle" + sharedPeerPhase (lookupPeerOrFail peer sharedState') @?= PeerIdle + step "Assert only the becoming-idle peer generation changes" + sharedPeerGeneration (lookupPeerOrFail peer sharedState') @?= 6 + sharedPeerGeneration (lookupPeerOrFail other sharedState') @?= 11 + where + peer = 1 + other = 2 + sharedState0 = emptySharedTxState + { sharedPeers = Map.fromList + [ (peer, (mkSharedPeerState PeerWaitingTxs (emptyPeerScore now)) { sharedPeerGeneration = 5 }) + , (other, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 11 }) + ] + } + sharedState' = updatePeerPhase peer PeerIdle sharedState0 + +unit_updatePeerPhase_wakesCompetingAdvertisers :: (String -> IO ()) -> Assertion +unit_updatePeerPhase_wakesCompetingAdvertisers step = do + step "Update an idle peer to a waiting phase" + sharedPeerPhase (lookupPeerOrFail leavingPeer sharedState') @?= PeerWaitingTxs + step "Assert competing idle advertisers are woken but unrelated peers are not" + sharedPeerGeneration (lookupPeerOrFail leavingPeer sharedState') @?= 5 + sharedPeerGeneration (lookupPeerOrFail competingPeer sharedState') @?= 12 + sharedPeerGeneration (lookupPeerOrFail unrelatedPeer sharedState') @?= 17 + where + leavingPeer = 1 + competingPeer = 2 + unrelatedPeer = 3 + txid = 1 + txSize = 10 + baseState = mkSharedState [txid] + key = lookupKeyOrFail txid baseState + k = unTxKey key + sharedState0 = baseState + { sharedPeers = Map.fromList + [ (leavingPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 5 }) + , (competingPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 11 }) + , (unrelatedPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 17 }) + ] + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxClaimable + , txAdvertisers = mkAdvertisers txSize + [ (leavingPeer, AckWhenResolved) + , (competingPeer, AckWhenResolved) + ] + , txTieBreakSalt = txid + , txAttempts = Map.empty + } + } + sharedState' = updatePeerPhase leavingPeer PeerWaitingTxs sharedState0 + +unit_updatePeerRequestedTxs_ignoresSaturation :: (String -> IO ()) -> Assertion +unit_updatePeerRequestedTxs_ignoresSaturation step = do + step "Update requested-tx counters across the saturation threshold" + sharedPeerRequestedTxBatches (lookupPeerOrFail owner sharedState') @?= 2 + step "Assert saturation no longer wakes competing advertisers" + sharedPeerGeneration (lookupPeerOrFail owner sharedState') @?= 3 + sharedPeerGeneration (lookupPeerOrFail competingPeer sharedState') @?= 7 + sharedPeerGeneration (lookupPeerOrFail unrelatedPeer sharedState') @?= 13 + where + owner = 1 + competingPeer = 2 + unrelatedPeer = 3 + txid = 1 + txSize = 10 + policy = + defaultTxDecisionPolicy { + maxOutstandingTxBatchesPerPeer = 2 + } + baseState = mkSharedState [txid] + key = lookupKeyOrFail txid baseState + k = unTxKey key + sharedState0 = baseState + { sharedPeers = Map.fromList + [ (owner, (mkSharedPeerState PeerWaitingTxs (emptyPeerScore now)) { + sharedPeerGeneration = 3, + sharedPeerRequestedTxBatches = 1, + sharedPeerRequestedTxsSize = txSize + }) + , (competingPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 7 }) + , (unrelatedPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 13 }) + ] + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxLeased owner (addTime 10 now) + , txAdvertisers = mkAdvertisers txSize + [ (owner, AckWhenBuffered) + , (competingPeer, AckWhenResolved) + ] + , txTieBreakSalt = txid + , txAttempts = Map.empty + } + } + peerState' = emptyPeerTxLocalState + { peerRequestedTxBatches = + StrictSeq.fromList + [ mkRequestedTxBatch [key] txSize + , mkRequestedTxBatch [TxKey 42] txSize + ] + , peerRequestedTxsSize = txSize * 2 + } + sharedState' = updatePeerRequestedTxs policy owner peerState' sharedState0 + +unit_updatePeerRequestedTxs_ignoresUnsaturatedChanges :: (String -> IO ()) -> Assertion +unit_updatePeerRequestedTxs_ignoresUnsaturatedChanges step = do + step "Update requested-tx counters without saturating the owner" + sharedPeerRequestedTxBatches (lookupPeerOrFail owner sharedState') @?= 2 + step "Assert unsaturated inflight changes do not wake any peer" + sharedPeerGeneration (lookupPeerOrFail owner sharedState') @?= 3 + sharedPeerGeneration (lookupPeerOrFail competingPeer sharedState') @?= 7 + sharedPeerGeneration (lookupPeerOrFail unrelatedPeer sharedState') @?= 13 where - hasTxSizeErr = any (\tx -> abs (getTxSize tx - getTxAdvSize tx) > TXS.const_MAX_TX_SIZE_DISCREPANCY) txsReceived + owner = 1 + competingPeer = 2 + unrelatedPeer = 3 + txid = 1 + txSize = 10 + policy = + defaultTxDecisionPolicy { + maxOutstandingTxBatchesPerPeer = 3 + } + baseState = mkSharedState [txid] + key = lookupKeyOrFail txid baseState + k = unTxKey key + sharedState0 = baseState + { sharedPeers = Map.fromList + [ (owner, (mkSharedPeerState PeerWaitingTxs (emptyPeerScore now)) { + sharedPeerGeneration = 3, + sharedPeerRequestedTxBatches = 1, + sharedPeerRequestedTxsSize = txSize + }) + , (competingPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 7 }) + , (unrelatedPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 13 }) + ] + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxLeased owner (addTime 10 now) + , txAdvertisers = mkAdvertisers txSize + [ (owner, AckWhenBuffered) + , (competingPeer, AckWhenResolved) + ] + , txTieBreakSalt = txid + , txAttempts = Map.empty + } + } + peerState' = emptyPeerTxLocalState + { peerRequestedTxBatches = + StrictSeq.fromList + [ mkRequestedTxBatch [key] txSize + , mkRequestedTxBatch [TxKey 42] txSize + ] + , peerRequestedTxsSize = txSize * 2 + } + sharedState' = updatePeerRequestedTxs policy owner peerState' sharedState0 + +-- Generate a peer state that respects phase-specific inflight fields. +genSharedPeerState :: Gen SharedPeerState +genSharedPeerState = do + sharedPeerPhase <- elements [PeerIdle, PeerWaitingTxIds, PeerWaitingTxs, PeerSubmittingToMempool] + peerScoreValue <- choose (0 :: Double, 100) + peerScoreTs <- genSmallTime + sharedPeerGeneration <- genSmallWord64 + (sharedPeerRequestedTxBatches, sharedPeerRequestedTxsSize) <- + case sharedPeerPhase of + PeerWaitingTxs -> do + batches <- chooseInt (1, 3) + totalSize <- genPositiveSize + pure (batches, totalSize) + _ -> + pure (0, 0) + pure SharedPeerState { + sharedPeerPhase, + sharedPeerScore = PeerScore peerScoreValue peerScoreTs, + sharedPeerGeneration, + sharedPeerRequestedTxBatches, + sharedPeerRequestedTxsSize + } - -- The `ProtocolErrorTxSizeError` type is an existential type. We know that - -- the type of `txid` is `TxId`, we just don't have evidence for it. - coerceTxId :: Typeable txid => txid -> TxId - coerceTxId txid = case cast txid of - Just a -> a - Nothing -> error "impossible happened! Is the test still using `TxId` for `txid`?" +-- Generate a self-consistent local peer view of requested, available, and downloaded txs. +genPeerTxLocalState :: Gen (PeerTxLocalState (Tx TxId)) +genPeerTxLocalState = sized $ \n -> do + let maxKeys = min 12 (n + 2) + + numKeys <- chooseInt (0, maxKeys) + peerRequestedTxIds <- fromIntegral <$> chooseInt (0, min 8 (n + 1)) + peerUnacknowledgedTxIds <- StrictSeq.fromList <$> shuffle [ TxKey key | key <- [0 .. numKeys - 1] ] + + downloadedKeys <- sublistOf (toList peerUnacknowledgedTxIds) + let downloadedSet = + IntSet.fromList [ unTxKey key | key <- downloadedKeys ] + remainingKeys = + [ key + | key <- toList peerUnacknowledgedTxIds + , not (IntSet.member (unTxKey key) downloadedSet) + ] + requestedKeys <- sublistOf remainingKeys + let requestedSet = + IntSet.fromList [ unTxKey key | key <- requestedKeys ] + availableExtraCandidates = + [ key + | key <- remainingKeys + , not (IntSet.member (unTxKey key) requestedSet) + ] + availableExtraKeys <- sublistOf availableExtraCandidates + let availableKeys = requestedKeys <> availableExtraKeys + peerAvailableTxIds <- + IntMap.fromList <$> mapM genAvailableTx availableKeys -deriving via OnlyCheckWhnfNamed "StdGen" StdGen instance NoThunks StdGen + let requestedKeysOrdered = + [ key + | key <- toList peerUnacknowledgedTxIds + , IntSet.member (unTxKey key) requestedSet + ] + (peerRequestedTxBatches, peerRequestedTxsSize) <- + genRequestedTxBatches peerAvailableTxIds requestedKeysOrdered + + peerDownloadedTxs <- + IntMap.fromList <$> mapM genDownloadedTx downloadedKeys + + pure PeerTxLocalState { + peerUnacknowledgedTxIds, + peerAvailableTxIds, + peerRequestedTxs = requestedSet, + peerRequestedTxBatches, + peerRequestedTxsSize, + peerRequestedTxIds, + peerDownloadedTxs + } + where + genAvailableTx key = do + txSize <- genPositiveSize + pure (unTxKey key, txSize) --- | Verify that `SharedTxState` returned by `collectTxsImpl` if evaluated to --- WHNF, it doesn't contain any thunks. --- -prop_collectTxsImpl_nothunks - :: ArbCollectTxs - -> Property -prop_collectTxsImpl_nothunks (ArbCollectTxs _mempoolHasTxFun txidsRequested txsReceived peeraddr _ st) = - case TXS.collectTxsImpl getTxSize peeraddr txidsRequested txsReceived st of - Right st' -> case unsafeNoThunks $! st' of - Nothing -> property True - Just ctx -> counterexample (show ctx) False - Left _ -> property True + genDownloadedTx key = do + txSize <- genPositiveSize + pure (unTxKey key, mkTx (txIdForKey key) txSize) +data PeerSeed = PeerSeed { + peerSeedScore :: !PeerScore + , peerSeedGeneration :: !Word64 + } -newtype ArbTxDecisionPolicy = ArbTxDecisionPolicy TxDecisionPolicy - deriving Show +data PeerDerivedUsage = PeerDerivedUsage { + peerHasSubmitting :: !Bool + , peerRequestedBatches :: !Int + , peerRequestedTxsBytes :: !SizeInBytes + } -instance Arbitrary ArbTxDecisionPolicy where - arbitrary = - ArbTxDecisionPolicy <$> ( - TxDecisionPolicy . getSmall . getPositive - <$> arbitrary - <*> (getSmall . getPositive <$> arbitrary) - <*> (SizeInBytes . getPositive <$> arbitrary) - <*> (getSmall . getPositive <$> arbitrary) - <*> (realToFrac <$> choose (0 :: Double, 2)) - <*> choose (0, 1) - <*> choose (0, 1800)) +-- Generate a shared tx state with distinct active and retained entries. +genSharedTxState :: Gen (SharedTxState PeerAddr TxId) +genSharedTxState = sized $ \n -> do + let maxPeers = min 6 (n + 1) + maxActiveTxs = min 8 (n + 2) + maxRetainedTxs = min 6 (n + 2) - shrink (ArbTxDecisionPolicy a@TxDecisionPolicy { - maxNumTxIdsToRequest, - txsSizeInflightPerPeer, - txInflightMultiplicity }) = - [ ArbTxDecisionPolicy a { maxNumTxIdsToRequest = NumTxIdsToReq x } - | (Positive (Small x)) <- shrink (Positive (Small (getNumTxIdsToReq maxNumTxIdsToRequest))) - ] - ++ - [ ArbTxDecisionPolicy a { txsSizeInflightPerPeer = SizeInBytes s } - | Positive s <- shrink (Positive (getSizeInBytes txsSizeInflightPerPeer)) - ] - ++ - [ ArbTxDecisionPolicy a { txInflightMultiplicity = x } - | Positive (Small x) <- shrink (Positive (Small txInflightMultiplicity)) - ] + numPeers <- chooseInt (0, maxPeers) + peeraddrs <- genDistinctPositiveInts numPeers + peerSeeds <- Map.fromList <$> mapM genPeerSeedEntry peeraddrs + numActiveTxs <- + if null peeraddrs + then pure 0 + else chooseInt (0, maxActiveTxs) + numRetainedTxs <- chooseInt (0, maxRetainedTxs) -prop_splitAcknowledgedTxIds - :: ArbDecisionContexts TxId - -> Property -prop_splitAcknowledgedTxIds - ArbDecisionContexts { - arbDecisionPolicy = policy@TxDecisionPolicy { maxNumTxIdsToRequest, - maxUnacknowledgedTxIds }, - arbSharedState = st - } - = - counterexample "ackedTxIds <> unackedTxIds ≠ unacknowledgedTxIds ps" - (ackedTxIds <> unackedTxIds === unacknowledgedTxIds ps) - .&&. counterexample "unackedAndRequested ≰ maxUnacknowledgedTxIds" - (unackedAndRequested <= maxUnacknowledgedTxIds) - .&&. counterexample "requestedTxIdsInflight ps ≰ maxNumTxIdsToRequest" - (requestedTxIdsInflight ps <= maxNumTxIdsToRequest) - .&&. counterexample "numTxIdsToReq ≰ maxNumTxIdsToRequest - requestedTxIdsInflight ps" - (numTxIdsToReq <= maxNumTxIdsToRequest - requestedTxIdsInflight ps) - .&&. counterexample "numTxIdsToReq ≰ maxUnacknowledgedTxIds - unackedAndRequested + fromIntegral numOfAckedTxIds" - (numTxIdsToReq <= maxUnacknowledgedTxIds - unackedAndRequested + fromIntegral numOfAckedTxIds) - where - ps = case Map.elems $ TXS.peerTxStates st of - a : _ -> a - [] -> error "generator invariant violation: empty peerTxStates map" - (numTxIdsToReq, ackedTxIds, unackedTxIds) - = TXS.splitAcknowledgedTxIds policy st ps - numOfAckedTxIds = StrictSeq.length ackedTxIds - numOfUnackedTxIds = StrictSeq.length unackedTxIds - unackedAndRequested = fromIntegral numOfUnackedTxIds + requestedTxIdsInflight ps + txids <- genDistinctPositiveInts (numActiveTxs + numRetainedTxs) + let (activeTxIds, retainedTxIds) = splitAt numActiveTxs txids + activeEntries <- mapM (genActiveTxEntry peeraddrs) activeTxIds + retainedEntries <- mapM genRetainedEntry retainedTxIds + sharedGeneration <- genSmallWord64 -data ArbDecisionContexts txid = ArbDecisionContexts { - arbDecisionPolicy :: TxDecisionPolicy, + pure $ buildSharedTxState peerSeeds activeEntries retainedEntries sharedGeneration + where + genPeerSeedEntry peeraddr = do + peerScoreValue <- choose (0 :: Double, 100) + peerScoreTs <- genSmallTime + peerSeedGeneration <- genSmallWord64 + pure ( peeraddr + , PeerSeed { + peerSeedScore = PeerScore peerScoreValue peerScoreTs, + peerSeedGeneration + } + ) - arbSharedState :: SharedTxState PeerAddr txid (Tx txid), + genRetainedEntry txid = do + retainUntil <- genSharedExpiryTime + pure (txid, retainUntil) - arbMempoolHasTx :: Fun txid Bool - -- ^ needed just for shrinking - } +-- Generate one active tx entry using a mix of leased and claimable shapes. +genActiveTxEntry :: [PeerAddr] -> TxId -> Gen (TxId, TxEntry PeerAddr) +genActiveTxEntry peeraddrs txid = do + txEntry <- frequency + [ (5, genLeasedTxEntry peeraddrs txid) + , (3, genClaimableTxEntry peeraddrs txid) + ] + pure (txid, txEntry) + +-- Generate a leased entry where the owner may already be downloading, buffered, or submitting. +genLeasedTxEntry :: [PeerAddr] -> TxId -> Gen (TxEntry PeerAddr) +genLeasedTxEntry peeraddrs txid = do + advertiserPeers <- genNonEmptySublist peeraddrs + owner <- elements advertiserPeers + txAdvertisers <- genOwnedAdvertisers advertiserPeers owner + txLease <- TxLeased owner <$> genSharedExpiryTime + ownerAttempt <- frequency + [ (2, pure Nothing) + , (2, Just <$> elements [TxDownloading, TxBuffered]) + , (1, pure (Just TxSubmitting)) + ] + pure TxEntry { + txLease, + txAdvertisers, + txTieBreakSalt = txid, + txAttempts = maybe Map.empty (Map.singleton owner) ownerAttempt + } -instance Show txid => Show (ArbDecisionContexts txid) where - show ArbDecisionContexts { - arbDecisionPolicy, - arbSharedState = st, - arbMempoolHasTx +-- Generate a claimable entry advertised by one or more resolved peers. +genClaimableTxEntry :: [PeerAddr] -> TxId -> Gen (TxEntry PeerAddr) +genClaimableTxEntry peeraddrs txid = do + advertiserPeers <- genNonEmptySublist peeraddrs + txAdvertisers <- genResolvedAdvertisers advertiserPeers + pure TxEntry { + txLease = TxClaimable, + txAdvertisers, + txTieBreakSalt = txid, + txAttempts = Map.empty } - = - intercalate "\n\t" - [ "ArbDecisionContext" - , show arbDecisionPolicy - , show st - , show arbMempoolHasTx - ] --- | Fix-up `PeerTxState` according to `TxDecisionPolicy`. --- -fixupPeerTxStateWithPolicy :: Ord txid - => TxDecisionPolicy - -> PeerTxState txid tx - -> PeerTxState txid tx -fixupPeerTxStateWithPolicy - TxDecisionPolicy { maxUnacknowledgedTxIds, - maxNumTxIdsToRequest } - ps@PeerTxState { unacknowledgedTxIds, - availableTxIds, - requestedTxsInflight, - requestedTxIdsInflight, - unknownTxs - } - = - ps { unacknowledgedTxIds = unacknowledgedTxIds', - availableTxIds = availableTxIds', - requestedTxsInflight = requestedTxsInflight', - requestedTxIdsInflight = requestedTxIdsInflight', - unknownTxs = unknownTxs' - } +-- Generate advertisers where only the chosen owner uses AckWhenBuffered. +genOwnedAdvertisers + :: [PeerAddr] + -> PeerAddr + -> Gen (Map.Map PeerAddr TxAdvertiser) +genOwnedAdvertisers advertiserPeers owner = + Map.fromList <$> mapM genAdvertiser advertiserPeers where - -- limit the number of unacknowledged txids, and then fix-up all the other - -- sets. - unacknowledgedTxIds' = StrictSeq.take (fromIntegral maxUnacknowledgedTxIds) - unacknowledgedTxIds - unackedSet = Set.fromList (toList unacknowledgedTxIds') - availableTxIds' = availableTxIds `Map.restrictKeys` unackedSet - requestedTxsInflight' = requestedTxsInflight `Set.intersection` unackedSet - -- requestedTxIdsInflight must be smaller than `maxNumTxIdsToRequest, and - -- also `requestedTxIdsInflight` and the number of `unacknowledgedTxIds'` - -- must be smaller or equal to `maxUnacknowledgedTxIds`. - requestedTxIdsInflight' = requestedTxIdsInflight - `min` maxNumTxIdsToRequest - `min` (maxUnacknowledgedTxIds - fromIntegral (StrictSeq.length unacknowledgedTxIds')) - unknownTxs' = unknownTxs `Set.intersection` unackedSet - - - --- | Fix-up `SharedTxState` so it satisfies `TxDecisionPolicy`. --- -fixupSharedTxStateForPolicy - :: forall peeraddr txid tx. - Ord txid - => (txid -> Bool) -- ^ mempoolHasTx - -> TxDecisionPolicy - -> SharedTxState peeraddr txid tx - -> SharedTxState peeraddr txid tx -fixupSharedTxStateForPolicy - mempoolHasTx - policy@TxDecisionPolicy { - txsSizeInflightPerPeer, - txInflightMultiplicity + genAdvertiser peeraddr = do + txAdvertisedSize <- genPositiveSize + let txAckState + | peeraddr == owner = AckWhenBuffered + | otherwise = AckWhenResolved + pure (peeraddr, TxAdvertiser { txAckState, txAdvertisedSize }) + +-- Generate advertisers that all acknowledge on resolution. +genResolvedAdvertisers :: [PeerAddr] -> Gen (Map.Map PeerAddr TxAdvertiser) +genResolvedAdvertisers advertiserPeers = + Map.fromList <$> mapM genAdvertiser advertiserPeers + where + genAdvertiser peeraddr = do + txAdvertisedSize <- genPositiveSize + pure (peeraddr, TxAdvertiser { txAckState = AckWhenResolved, txAdvertisedSize }) + +-- Rebuild a shared state from tx-centric fixtures while preserving interned keys. +buildSharedTxState + :: Map.Map PeerAddr PeerSeed + -> [(TxId, TxEntry PeerAddr)] + -> [(TxId, Time)] + -> Word64 + -> SharedTxState PeerAddr TxId +buildSharedTxState peerSeeds activeEntries retainedEntries sharedGeneration = + baseState { + sharedPeers = deriveSharedPeers peerSeeds activeEntries, + sharedTxTable = + IntMap.fromList + [ (unTxKey (lookupKeyOrFail txid baseState), txEntry) + | (txid, txEntry) <- activeEntries + ], + sharedRetainedTxs = + retainedFromList + [ (unTxKey (lookupKeyOrFail txid baseState), retainUntil) + | (txid, retainUntil) <- retainedEntries + ], + sharedGeneration } - st@SharedTxState { peerTxStates } - = - fixupSharedTxState - mempoolHasTx - st { peerTxStates = snd . mapAccumR fn (0, Map.empty) $ peerTxStates } where - -- fixup `PeerTxState` and accumulate size of all `tx`'s in-flight across - -- all peers. - fn :: (SizeInBytes, Map txid Int) - -> PeerTxState txid tx - -> ((SizeInBytes, Map txid Int), PeerTxState txid tx) - fn - (sizeInflightAll, inflightMap) - ps - = - ( ( sizeInflightAll + requestedTxsInflightSize' - , inflightMap' - ) - , ps' { requestedTxsInflight = requestedTxsInflight', - requestedTxsInflightSize = requestedTxsInflightSize' - } - ) - where - ps' = fixupPeerTxStateWithPolicy policy ps - - (requestedTxsInflightSize', requestedTxsInflight', inflightMap') = - Map.foldrWithKey - (\txid txSize r@(!inflightSize, !inflightSet, !inflight) -> - let (multiplicity, inflight') = - Map.alterF - (\case - Nothing -> (1, Just 1) - Just x -> let x' = x + 1 in (x', Just $! x')) - txid inflight - in if inflightSize <= txsSizeInflightPerPeer - && multiplicity <= txInflightMultiplicity - then (txSize + inflightSize, Set.insert txid inflightSet, inflight') - else r - ) - (0, Set.empty, inflightMap) - (availableTxIds ps' `Map.restrictKeys` requestedTxsInflight ps') - -instance (Arbitrary txid, Ord txid, Function txid, CoArbitrary txid) - => Arbitrary (ArbDecisionContexts txid) where - - arbitrary = do - ArbTxDecisionPolicy policy <- arbitrary - (mempoolHasTx, _ps, st, _) <- - genSharedTxState (fromIntegral $ maxNumTxIdsToRequest policy) - let st' = fixupSharedTxStateForPolicy - (apply mempoolHasTx) policy st - - return $ ArbDecisionContexts { - arbDecisionPolicy = policy, - arbMempoolHasTx = mempoolHasTx, - arbSharedState = st' + baseState = mkSharedState (fmap fst activeEntries <> fmap fst retainedEntries) + +-- Derive peer phases and inflight counters from the generated tx entries. +deriveSharedPeers + :: Map.Map PeerAddr PeerSeed + -> [(TxId, TxEntry PeerAddr)] + -> Map.Map PeerAddr SharedPeerState +deriveSharedPeers peerSeeds activeEntries = + Map.mapWithKey buildPeerState completePeerSeeds + where + completePeerSeeds = + foldl' addMissingPeerSeed peerSeeds (concatMap (entryPeers . snd) activeEntries) + + peerUsages = + foldl' accumulatePeerUsage Map.empty activeEntries + + addMissingPeerSeed acc peeraddr = + Map.insertWith (\_ old -> old) peeraddr defaultPeerSeed acc + + buildPeerState peeraddr PeerSeed { peerSeedScore, peerSeedGeneration } = + let PeerDerivedUsage { + peerHasSubmitting, + peerRequestedBatches, + peerRequestedTxsBytes + } = Map.findWithDefault emptyPeerDerivedUsage peeraddr peerUsages + sharedPeerPhase + | peerHasSubmitting = PeerSubmittingToMempool + | peerRequestedBatches > 0 = PeerWaitingTxs + | otherwise = PeerIdle in + SharedPeerState { + sharedPeerPhase, + sharedPeerScore = peerSeedScore, + sharedPeerGeneration = peerSeedGeneration, + sharedPeerRequestedTxBatches = peerRequestedBatches, + sharedPeerRequestedTxsSize = peerRequestedTxsBytes } - shrink a@ArbDecisionContexts { - arbDecisionPolicy = policy, - arbMempoolHasTx = mempoolHasTx, - arbSharedState = sharedState - } = - -- shrink shared state - [ a { arbSharedState = sharedState'' } - | sharedState' <- shrinkSharedTxState (apply mempoolHasTx) sharedState - , let sharedState'' = fixupSharedTxStateForPolicy - (apply mempoolHasTx) policy sharedState' - , sharedState'' /= sharedState - ] + defaultPeerSeed = + PeerSeed { + peerSeedScore = emptyPeerScore now, + peerSeedGeneration = 0 + } +-- Default derived usage for a peer with no active work. +emptyPeerDerivedUsage :: PeerDerivedUsage +emptyPeerDerivedUsage = + PeerDerivedUsage { + peerHasSubmitting = False, + peerRequestedBatches = 0, + peerRequestedTxsBytes = 0 + } --- | Construct decision context in a deterministic way. For micro benchmarks. --- --- It is based on QuickCheck's `arbitrary` instance for `ArbDecisionContexts. --- -mkDecisionContext :: SMGen - -- ^ pseudo random generator - -> Int - -- ^ size - -> (TxDecisionPolicy, SharedTxState PeerAddr TxId (Tx TxId)) -mkDecisionContext stdgen size = - case unGen gen (QCGen stdgen) size of - ArbDecisionContexts { arbDecisionPolicy = policy, - arbSharedState = sharedState - } -> (policy, sharedState) +-- Fold one tx entry's attempts into the derived per-peer usage map. +accumulatePeerUsage + :: Map.Map PeerAddr PeerDerivedUsage + -> (TxId, TxEntry PeerAddr) + -> Map.Map PeerAddr PeerDerivedUsage +accumulatePeerUsage acc (_, TxEntry { txAdvertisers, txAttempts }) = + foldl' step acc (Map.toList txAttempts) where - gen :: Gen (ArbDecisionContexts TxId) - gen = arbitrary - + step acc' (peeraddr, attempt) = + case attempt of + TxDownloading -> + updatePeerUsage peeraddr False 1 (advertisedSize peeraddr) acc' + TxSubmitting -> + updatePeerUsage peeraddr True 0 0 acc' + TxBuffered -> + updatePeerUsage peeraddr False 0 0 acc' + TxNoAttempt -> + acc' + + advertisedSize peeraddr = maybe 0 txAdvertisedSize (Map.lookup peeraddr txAdvertisers) + +-- Merge one peer's submitting and inflight usage into the accumulator. +updatePeerUsage + :: PeerAddr + -> Bool + -> Int + -> SizeInBytes + -> Map.Map PeerAddr PeerDerivedUsage + -> Map.Map PeerAddr PeerDerivedUsage +updatePeerUsage peeraddr submitting requestedBatches requestedTxsBytes acc = + Map.insert peeraddr usage' acc + where + usage = + Map.findWithDefault emptyPeerDerivedUsage peeraddr acc + usage' = + usage { + peerHasSubmitting = peerHasSubmitting usage || submitting, + peerRequestedBatches = peerRequestedBatches usage + requestedBatches, + peerRequestedTxsBytes = peerRequestedTxsBytes usage + requestedTxsBytes + } -prop_ArbDecisionContexts_generator - :: ArbDecisionContexts TxId - -> Property -prop_ArbDecisionContexts_generator - ArbDecisionContexts { arbSharedState = st } - = - -- whenFail (pPrint a) $ - sharedTxStateInvariant StrongInvariant st - - -prop_ArbDecisionContexts_shrinker - :: ArbDecisionContexts TxId - -> Every -prop_ArbDecisionContexts_shrinker - ctx - = - foldMap (\a -> - Every - . counterexample (show a) - . sharedTxStateInvariant StrongInvariant - . arbSharedState - $ a) - $ shrink ctx - - --- | Verify that `makeDecisions` preserves the `SharedTxState` invariant. --- -prop_makeDecisions_sharedstate - :: ArbDecisionContexts TxId - -> Property -prop_makeDecisions_sharedstate - ArbDecisionContexts { arbDecisionPolicy = policy, - arbSharedState = sharedTxState } = - let (sharedState, decisions) = TXS.makeDecisions policy sharedTxState (peerTxStates sharedTxState) - in counterexample (show sharedState) - $ counterexample (show decisions) - $ sharedTxStateInvariant StrongInvariant sharedState +-- Collect every peer mentioned by a tx entry. +entryPeers :: TxEntry PeerAddr -> [PeerAddr] +entryPeers TxEntry { txLease, txAdvertisers, txAttempts } = + leaseOwner <> Map.keys txAdvertisers <> Map.keys txAttempts + where + leaseOwner = + case txLease of + TxLeased owner _ -> [owner] + TxClaimable -> [] + +-- Shrink shared state by dropping active txs, retained txs, or unused peers. +shrinkSharedTxState + :: SharedTxState PeerAddr TxId + -> [SharedTxState PeerAddr TxId] +shrinkSharedTxState sharedState = + nub $ + [ emptySharedTxState + , buildSharedTxState peerSeeds [] retainedEntries 0 + , buildSharedTxState peerSeeds activeEntries [] 0 + , buildSharedTxState usedPeerSeeds activeEntries retainedEntries 0 + ] ++ + [ buildSharedTxState peerSeeds activeEntries' retainedEntries 0 + | activeEntries' <- smallerActiveEntries + ] ++ + [ buildSharedTxState peerSeeds activeEntries retainedEntries' 0 + | retainedEntries' <- smallerRetainedEntries + ] + where + activeEntries = + [ (resolveTxKey sharedState (TxKey k), txEntry) + | (k, txEntry) <- IntMap.toList (sharedTxTable sharedState) + ] + retainedEntries = + [ (resolveTxKey sharedState (TxKey k), retainUntil) + | (k, retainUntil) <- retainedToList (sharedRetainedTxs sharedState) + ] + peerSeeds = + Map.map + (\SharedPeerState { sharedPeerScore, sharedPeerGeneration } -> + PeerSeed { + peerSeedScore = sharedPeerScore, + peerSeedGeneration = sharedPeerGeneration + }) + (sharedPeers sharedState) + usedPeers = + foldl' (\peers (_, txEntry) -> peers <> entryPeers txEntry) [] activeEntries + usedPeerSeeds = + Map.filterWithKey (\peeraddr _ -> peeraddr `elem` usedPeers) peerSeeds + smallerActiveEntries = + take 6 + [ activeEntries' + | activeEntries' <- shrinkList (const []) activeEntries + , length activeEntries' < length activeEntries + ] + smallerRetainedEntries = + take 6 + [ retainedEntries' + | retainedEntries' <- shrinkList (const []) retainedEntries + , length retainedEntries' < length retainedEntries + ] +-- Partition requested keys into a small number of contiguous request batches. +genRequestedTxBatches + :: IntMap.IntMap SizeInBytes + -> [TxKey] + -> Gen (StrictSeq.StrictSeq RequestedTxBatch, SizeInBytes) +genRequestedTxBatches _ [] = + pure (StrictSeq.empty, 0) +genRequestedTxBatches peerAvailableTxIds requestedKeys = do + batchCount <- chooseInt (1, min 3 (length requestedKeys)) + batchLengths <- genPositivePartition (length requestedKeys) batchCount + let batches = + [ mkRequestedTxBatch keys (sum [ lookupAvailableTxSize key | key <- keys ]) + | keys <- splitByLengths batchLengths requestedKeys + ] + peerRequestedTxsSize = + sum [ requestedTxBatchSize batch | batch <- batches ] + pure (StrictSeq.fromList batches, peerRequestedTxsSize) + where + lookupAvailableTxSize key = + case IntMap.lookup (unTxKey key) peerAvailableTxIds of + Just txSize -> txSize + Nothing -> error "TxLogic.genRequestedTxBatches: missing requested tx size" + +-- Split a positive total into a fixed number of positive parts. +genPositivePartition :: Int -> Int -> Gen [Int] +genPositivePartition totalCount 1 = + pure [totalCount] +genPositivePartition totalCount parts = do + n <- chooseInt (1, totalCount - parts + 1) + (n :) <$> genPositivePartition (totalCount - n) (parts - 1) + +-- Split a list according to a list of chunk lengths. +splitByLengths :: [Int] -> [a] -> [[a]] +splitByLengths [] [] = [] +splitByLengths [] _ = [] +splitByLengths (n : ns) xs = + let (prefix, suffix) = splitAt n xs in + prefix : splitByLengths ns suffix + +-- Pick a sublist, falling back to a singleton when the input is non-empty. +genNonEmptySublist :: [a] -> Gen [a] +genNonEmptySublist [] = + pure [] +genNonEmptySublist xs = do + ys <- sublistOf xs + case ys of + [] -> (: []) <$> elements xs + _ -> pure ys + +-- Generate distinct positive ints from a bounded shuffled range. +genDistinctPositiveInts :: Int -> Gen [Int] +genDistinctPositiveInts count + | count <= 0 = pure [] + | otherwise = take count <$> shuffle [1 .. max count (count * 4 + 5)] + +-- Generate expiry times near the shared test reference time. +genSharedExpiryTime :: Gen Time +genSharedExpiryTime = + Time . fromIntegral <$> chooseInt (80, 120) + +-- Generate a positive tx size +genPositiveSize :: Gen SizeInBytes +genPositiveSize = + fromIntegral <$> chooseInt (1, fromIntegral $ getSizeInBytes max_TX_SIZE) + +-- Generate a small test timestamp. +genSmallTime :: Gen Time +genSmallTime = + Time . fromIntegral <$> chooseInt (0, 1000) + +-- Generate a small test generation counter. +genSmallWord64 :: Gen Word64 +genSmallWord64 = + fromIntegral <$> chooseInt (0, 1000) + +-- Recover the fixture txid associated with an interned key. +txIdForKey :: TxKey -> TxId +txIdForKey (TxKey key) = key + 1 + +-- Fixed reference time used by deterministic test fixtures. +now :: Time +now = Time 100 + +-- Convert a positive QuickCheck value into a bounded non-zero tx size. +mkSize :: Positive Int -> SizeInBytes +mkSize (Positive n) = fromIntegral ((n `mod` 65535) + 1) + +-- Build a valid test transaction with matching body and advertised size. +mkTx :: TxId -> SizeInBytes -> Tx TxId +mkTx txid txSize = Tx + { getTxId = txid + , getTxSize = txSize + , getTxAdvSize = txSize + , getTxValid = True + , getTxParent = Nothing + } --- | Verify that `makeDecisions`: --- --- * modifies `inflightTxs` map by adding `tx`s which are inflight; --- * updates `requestedTxsInflightSize` correctly; --- * in-flight `tx`s set is disjoint with `bufferedTxs`; --- * requested `tx`s are coming from `availableTxIds`. --- -prop_makeDecisions_inflight - :: ArbDecisionContexts TxId - -> Property -prop_makeDecisions_inflight - ArbDecisionContexts { - arbDecisionPolicy = policy, - arbSharedState = sharedTxState - } - = - let (sharedState', decisions) = TXS.makeDecisions policy sharedTxState (peerTxStates sharedTxState) - - inflightSet :: Set TxId - inflightSet = foldMap (Map.keysSet . txdTxsToRequest) decisions - - inflightSize :: Map PeerAddr SizeInBytes - inflightSize = Map.foldrWithKey - (\peer TxDecision { txdTxsToRequest } m -> - Map.insert peer - (foldMap (\txid -> fromMaybe 0 $ Map.lookup peer (peerTxStates sharedTxState) - >>= Map.lookup txid . availableTxIds) - (Map.keysSet txdTxsToRequest)) - m - ) Map.empty decisions - - bufferedSet :: Set TxId - bufferedSet = Map.keysSet (bufferedTxs sharedTxState) - in - counterexample (show sharedState') $ - counterexample (show decisions) $ - - -- 'inflightTxs' set is increased by exactly the requested txs - counterexample (concat - [ show inflightSet - , " not a subset of " - , show (inflightTxs sharedState') - ]) - ( inflightSet <> Map.keysSet (inflightTxs sharedState') - === - Map.keysSet (inflightTxs sharedState') - ) - - .&&. - - -- for each peer size in flight is equal to the original size in flight - -- plus size of all requested txs - property - (fold - (Map.merge - (Map.mapMaybeMissing - (\peer a -> - Just ( Every - . counterexample - ("missing peer in requestedTxsInflightSize: " ++ show peer) - $ (a === 0)))) - (Map.mapMaybeMissing (\_ _ -> Nothing)) - (Map.zipWithMaybeMatched - (\peer delta PeerTxState { requestedTxsInflightSize } -> - let original = - case Map.lookup peer (peerTxStates sharedTxState) of - Nothing -> 0 - Just PeerTxState { requestedTxsInflightSize = a } -> a - in Just ( Every - . counterexample (show peer) - $ original + delta - === - requestedTxsInflightSize - ) - )) - inflightSize - (peerTxStates sharedState'))) - - .&&. counterexample ("requested txs must not be buffered: " - ++ show (inflightSet `Set.intersection` bufferedSet)) - (inflightSet `Set.disjoint` bufferedSet) - - .&&. counterexample "requested txs must be available" - ( fold $ - Map.merge - (Map.mapMissing (\peeraddr _ -> - Every $ - counterexample ("peer missing in peerTxStates " ++ show peeraddr) - False)) - (Map.mapMissing (\_ _ -> Every True)) - (Map.zipWithMatched (\peeraddr a b -> Every - . counterexample (show peeraddr) - $ a `Set.isSubsetOf` b)) - -- map of requested txs - (Map.fromList [ (peeraddr, Map.keysSet txids) - | (peeraddr, TxDecision { txdTxsToRequest = txids }) - <- Map.assocs decisions - ]) - -- map of available txs - (Map.map (Map.keysSet . availableTxIds) - (peerTxStates sharedTxState))) - - --- | Verify that `makeTxDecisions` obeys `TxDecisionPolicy`. --- -prop_makeDecisions_policy - :: ArbDecisionContexts TxId - -> Property -prop_makeDecisions_policy - ArbDecisionContexts { - arbDecisionPolicy = policy@TxDecisionPolicy { txsSizeInflightPerPeer, - txInflightMultiplicity }, - arbSharedState = sharedTxState - } = - let (sharedState', _decisions) = TXS.makeDecisions policy sharedTxState (peerTxStates sharedTxState) - txsSizeInflightPerPeerEff = txsSizeInflightPerPeer + maxTxSize - in - -- size in flight for each peer cannot exceed `txsSizeInflightPerPeer` - counterexample "size in flight per peer vaiolation" ( - foldMap - (\PeerTxState { availableTxIds, requestedTxsInflight } -> - let inflight = fold (availableTxIds `Map.restrictKeys` requestedTxsInflight) - in Every $ counterexample (show (inflight, txsSizeInflightPerPeerEff)) $ - inflight - <= - txsSizeInflightPerPeerEff - ) - (peerTxStates sharedState') - ) - - .&&. - ( - -- none of the multiplicities should go above the - -- `txInflightMultiplicity` - let inflight = inflightTxs sharedState' - in - counterexample ("multiplicities violation: " ++ show inflight) - . foldMap (Every . (<= txInflightMultiplicity)) - $ inflight - ) - - --- | Verify that `makeDecisions` and `acknowledgeTxIds` are compatible. --- -prop_makeDecisions_acknowledged - :: ArbDecisionContexts TxId - -> Property -prop_makeDecisions_acknowledged - ArbDecisionContexts { arbDecisionPolicy = policy, - arbSharedState = sharedTxState - } = - whenFail (pPrintOpt CheckColorTty defaultOutputOptionsDarkBg { outputOptionsCompact = True } sharedTxState) $ - let (_, decisions) = TXS.makeDecisions policy sharedTxState (peerTxStates sharedTxState) - - ackFromDecisions :: Map PeerAddr NumTxIdsToAck - ackFromDecisions = Map.fromList - [ (peer, txdTxIdsToAcknowledge) - | (peer, TxDecision { txdTxIdsToAcknowledge }) - <- Map.assocs decisions - ] - - ackFromState :: Map PeerAddr NumTxIdsToAck - ackFromState = - Map.map (\ps -> case TXS.acknowledgeTxIds policy sharedTxState ps of - (a, _, _, _, _) -> a) - . peerTxStates - $ sharedTxState - - in counterexample (show (ackFromDecisions, ackFromState)) - . fold - $ Map.merge - -- it is an error if `ackFromDecisions` contains a result which is - -- missing in `ackFromState` - (Map.mapMissing (\addr num -> Every $ counterexample ("missing " ++ show (addr, num)) False)) - -- if `ackFromState` contains an enty which is missing in - -- `ackFromDecisions` it must be `0`; `makeDecisions` might want to - -- download some `tx`s even if there's nothing to acknowledge - (Map.mapMissing (\_ d -> Every (d === 0))) - -- if both entries exists they must be equal - (Map.zipWithMatched (\_ a b -> Every (a === b))) - ackFromDecisions - ackFromState - - --- | `makeDecision` is exhaustive in the sense that it returns an empty --- decision list on a state returned by a prior call of `makeDecision`. --- -prop_makeDecisions_exhaustive - :: ArbDecisionContexts TxId - -> Property -prop_makeDecisions_exhaustive - ArbDecisionContexts { - arbDecisionPolicy = policy, - arbSharedState = sharedTxState +-- Construct a peer fixture with zeroed generation and inflight counters. +mkSharedPeerState :: PeerPhase -> PeerScore -> SharedPeerState +mkSharedPeerState sharedPeerPhase sharedPeerScore = + SharedPeerState { + sharedPeerPhase, + sharedPeerScore, + sharedPeerGeneration = 0, + sharedPeerRequestedTxBatches = 0, + sharedPeerRequestedTxsSize = 0 } - = - let (sharedTxState', decisions') - = TXS.makeDecisions policy - sharedTxState - (peerTxStates sharedTxState) - (sharedTxState'', decisions'') - = TXS.makeDecisions policy - sharedTxState' - (peerTxStates sharedTxState') - in counterexample ("decisions': " ++ show decisions') - . counterexample ("state': " ++ show sharedTxState') - . counterexample ("decisions'': " ++ show decisions'') - . counterexample ("state'': " ++ show sharedTxState'') - $ null decisions'' - - -data ArbDecisionContextWithReceivedTxIds = ArbDecisionContextWithReceivedTxIds { - adcrDecisionPolicy :: TxDecisionPolicy, - adcrSharedState :: SharedTxState PeerAddr TxId (Tx TxId), - adcrPeerTxState :: PeerTxState TxId (Tx TxId), - adcrMempoolHasTx :: Fun TxId Bool, - adcrTxsToAck :: [Tx TxId], - -- txids to acknowledge - adcrPeerAddr :: PeerAddr - -- the peer which owns the acknowledged txids - } - deriving Show - - -instance Arbitrary ArbDecisionContextWithReceivedTxIds where - arbitrary = do - ArbTxDecisionPolicy policy <- arbitrary - ArbReceivedTxIds mempoolHasTx - txIdsToAck - peeraddr - ps - st - <- arbitrary - - let st' = fixupSharedTxStateForPolicy - (apply mempoolHasTx) - policy st - ps' = fixupPeerTxStateWithPolicy policy ps - txIdsToAck' = take (fromIntegral (TXS.requestedTxIdsInflight $ peerTxStates st' Map.! peeraddr)) txIdsToAck - - downTxsNum <- choose (0, length txIdsToAck') - let downloadedTxs = Foldable.foldl' pruneTx Map.empty $ take downTxsNum $ Map.toList (bufferedTxs st') - ps'' = ps' { downloadedTxs = downloadedTxs } - - return ArbDecisionContextWithReceivedTxIds { - adcrDecisionPolicy = policy, - adcrSharedState = st', - adcrPeerTxState = ps'', - adcrMempoolHasTx = mempoolHasTx, - adcrTxsToAck = txIdsToAck', - adcrPeerAddr = peeraddr - } - where - pruneTx :: Map TxId tx -> (TxId, Maybe tx) -> Map TxId tx - pruneTx m (_, Nothing) = m - pruneTx m (txid, Just tx) = Map.insert txid tx m - - shrink ArbDecisionContextWithReceivedTxIds { - adcrDecisionPolicy = policy, - adcrSharedState = st, - adcrPeerTxState = ps, - adcrMempoolHasTx = mempoolHasTx, - adcrTxsToAck = txIdsToAck, - adcrPeerAddr = peeraddr - } - = - [ ArbDecisionContextWithReceivedTxIds { - adcrDecisionPolicy = policy', - adcrSharedState = st', - adcrPeerTxState = ps, - adcrMempoolHasTx = mempoolHasTx', - adcrTxsToAck = txIdsToAck', - adcrPeerAddr = peeraddr - } - | ArbDecisionContexts { - arbDecisionPolicy = policy', - arbSharedState = st', - arbMempoolHasTx = mempoolHasTx' - } - <- shrink ArbDecisionContexts { - arbDecisionPolicy = policy, - arbSharedState = st, - arbMempoolHasTx = mempoolHasTx - } - , peeraddr `Map.member` peerTxStates st' - , let txIdsToAck' = take ( fromIntegral - . TXS.requestedTxIdsInflight - $ peerTxStates st' Map.! peeraddr - ) - txIdsToAck - ] +-- Intern a list of txids into an otherwise empty shared state. +mkSharedState :: [TxId] -> SharedTxState PeerAddr TxId +mkSharedState txids = snd (internTxIds txids emptySharedTxState) --- | `filterActivePeers` should not change decisions made by `makeDecisions` --- -prop_filterActivePeers_not_limitting_decisions - :: ArbDecisionContexts TxId - -> Property -prop_filterActivePeers_not_limitting_decisions - ArbDecisionContexts { - arbDecisionPolicy = policy, - arbSharedState = st - } - = - counterexample (unlines - ["decisions: " ++ show decisions - ," " ++ show decisionPeers - ,"active decisions: " ++ show decisionsOfActivePeers - ," " ++ show activePeers]) $ - - counterexample ("active peers does not restrict the total number of valid decisions available" - ++ show (decisionsOfActivePeers Map.\\ decisions) - ) - (Map.keysSet decisionsOfActivePeers `Set.isSubsetOf` Map.keysSet decisions) - where - activePeersMap = TXS.filterActivePeers policy st - activePeers = Map.keysSet activePeersMap - (_, decisionsOfActivePeers) - = TXS.makeDecisions policy st activePeersMap +-- Construct a single advertiser entry. +mkAdvertiser :: TxOwnerAckState -> SizeInBytes -> TxAdvertiser +mkAdvertiser txAckState txAdvertisedSize = TxAdvertiser { txAckState, txAdvertisedSize } - (_, decisions) = TXS.makeDecisions policy st (peerTxStates st) - decisionPeers = Map.keysSet decisions +-- Construct an advertiser map that shares one advertised size across peers. +mkAdvertisers :: SizeInBytes -> [(PeerAddr, TxOwnerAckState)] -> Map.Map PeerAddr TxAdvertiser +mkAdvertisers txSize = Map.fromList . fmap (\(peeraddr, txAckState) -> (peeraddr, mkAdvertiser txAckState txSize)) + +-- Construct a requested batch together with its cached key set. +mkRequestedTxBatch :: [TxKey] -> SizeInBytes -> RequestedTxBatch +mkRequestedTxBatch keys requestedTxBatchSize = RequestedTxBatch + { requestedTxBatchSet = IntSet.fromList (map unTxKey keys) + , requestedTxBatchSize + } +-- Construct a leased tx entry owned by one peer. +mkTxEntry :: PeerAddr -> SizeInBytes -> Maybe TxAttemptState -> TxEntry PeerAddr +mkTxEntry peeraddr txSize mAttempt = TxEntry + { txLease = TxLeased peeraddr (addTime 10 now) + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenBuffered txSize) + , txTieBreakSalt = 0 + , txAttempts = maybe Map.empty (Map.singleton peeraddr) mAttempt + } --- TODO: makeDecisions property: all peers which have txid's to ack are --- included, this would catch the other bug, and it's important for the system --- to run well. +-- Look up an interned key and fail fast in test setup code. +lookupKeyOrFail :: TxId -> SharedTxState PeerAddr TxId -> TxKey +lookupKeyOrFail txid st = + case lookupTxKey txid st of + Just txKey -> txKey + Nothing -> error "TxLogic.lookupKeyOrFail: missing tx key" + +-- Look up an active tx entry and fail fast in test setup code. +lookupEntryOrFail :: TxKey -> SharedTxState PeerAddr TxId -> TxEntry PeerAddr +lookupEntryOrFail (TxKey k) st = + case IntMap.lookup k (sharedTxTable st) of + Just txEntry -> txEntry + Nothing -> error "TxLogic.lookupEntryOrFail: missing tx entry" + +-- Look up a shared peer and fail fast in test setup code. +lookupPeerOrFail :: PeerAddr -> SharedTxState PeerAddr TxId -> SharedPeerState +lookupPeerOrFail peeraddr st = + case Map.lookup peeraddr (sharedPeers st) of + Just sharedPeerState -> sharedPeerState + Nothing -> error "TxLogic.lookupPeerOrFail: missing peer" + +-- Drop duplicate txids while keeping the first proposed size. +dedupeBatch :: [(TxId, SizeInBytes)] -> [(TxId, SizeInBytes)] +dedupeBatch = nubBy ((==) `on` fst) + +-- Shift proposed txids forward until the batch is disjoint from the shared intern table. +freshBatchAgainstSharedState :: SharedTxState PeerAddr TxId -> [(TxId, SizeInBytes)] -> [(TxId, SizeInBytes)] +freshBatchAgainstSharedState sharedState = reverse . snd . foldl' step (reserved, []) + where + reserved = IntSet.fromList (Map.keys (sharedTxIdToKey sharedState)) --- --- Auxiliary functions --- + step (used, acc) (txid, txSize) = + let freshTxId = firstFreshTxId used txid in + (IntSet.insert freshTxId used, (freshTxId, txSize) : acc) -labelInt :: (Integral a, Eq a, Ord a, Show a) - => a -- ^ upper bound - -> a -- ^ width - -> a -- ^ value - -> String -labelInt _ _ 0 = "[0, 0]" -labelInt bound _ b | b >= bound = "[" ++ show bound ++ ", inf)" -labelInt _ a b = - let l = a * (b `div` a) - u = l + a - in (if l == 0 then "(" else "[") - ++ show l ++ ", " - ++ show u ++ ")" +-- Find the first txid not present in the reserved set. +firstFreshTxId :: IntSet.IntSet -> TxId -> TxId +firstFreshTxId used = go + where + go txid + | IntSet.member txid used = go (txid + 1) + | otherwise = txid diff --git a/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs b/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs index d0465d13579..a50e49b4b0a 100644 --- a/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs +++ b/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs @@ -4,11 +4,9 @@ {-# OPTIONS_GHC -Wno-orphans #-} module Ouroboros.Network.Tracing.TxSubmission () where -import Control.Arrow -import Control.Monad.Class.MonadTime.SI import Data.Aeson +import Data.IntMap.Strict qualified as IntMap import Data.Map.Strict qualified as Map -import Data.Set qualified as Set import Cardano.Logging import Ouroboros.Network.TxSubmission.Inbound.V2.Types @@ -17,65 +15,111 @@ instance (Show txid, Show peeraddr) => LogFormatting (TraceTxLogic peeraddr txid forMachine dtal (TraceSharedTxState label SharedTxState {..}) = mconcat $ [ "kind" .= String "TraceSharedTxState" , "label" .= label - , "inflightTxs" .= (fmap (first show) . Map.toList $ inflightTxs) - , "bufferedTxs" .= (fmap show . Set.toList . Map.keysSet $ bufferedTxs) - , "timedTxs" .= (fmap (\(Time t, txids) -> (t, fmap show txids)) . Map.toList $ timedTxs) - , "inSubmissionToMempoolTxs" .= (fmap (first show) . Map.toList $ inSubmissionToMempoolTxs) + , "sharedGeneration" .= sharedGeneration + , "peerCount" .= Map.size sharedPeers + , "activeTxCount" .= IntMap.size sharedTxTable + , "retainedTxCount" .= retainedSize sharedRetainedTxs + , "internedTxCount" .= Map.size sharedTxIdToKey + , "leasedTxCount" .= leasedTxCount + , "claimableTxCount" .= claimableTxCount + , "resolvedTxCount" .= resolvedTxCount + , "downloadingAttemptCount" .= downloadingAttemptCount + , "bufferedAttemptCount" .= bufferedAttemptCount + , "submittingAttemptCount" .= submittingAttemptCount + , "peerPhases" .= peerPhases ] ++ more where + activeEntries = IntMap.elems sharedTxTable + + leasedTxCount = + length [ () | TxEntry { txLease = TxLeased _ _ } <- activeEntries ] + + claimableTxCount = + length [ () | TxEntry { txLease = TxClaimable } <- activeEntries ] + + resolvedTxCount = 0 :: Int + + downloadingAttemptCount = + sum [ length [ () | TxDownloading <- Map.elems txAttempts' ] + | TxEntry { txAttempts = txAttempts' } <- activeEntries + ] + + bufferedAttemptCount = + sum [ length [ () | TxBuffered <- Map.elems txAttempts' ] + | TxEntry { txAttempts = txAttempts' } <- activeEntries + ] + + submittingAttemptCount = + sum [ length [ () | TxSubmitting <- Map.elems txAttempts' ] + | TxEntry { txAttempts = txAttempts' } <- activeEntries + ] + + peerPhases = + Map.toList $ + Map.fromListWith (+) + [ (show sharedPeerPhase', 1 :: Int) + | SharedPeerState { sharedPeerPhase = sharedPeerPhase' } <- Map.elems sharedPeers + ] + + renderTxId txKey = + maybe "" show (IntMap.lookup txKey sharedKeyToTxId) + more = case dtal of DMaximum -> - [ "peerTxStates" .= (fmap (first show) . Map.toList $ inflightTxs) - , "referenceCounts" .= (fmap (first show) . Map.toList $ referenceCounts) + [ "sharedPeers" .= [ (show peeraddr, show peerState) + | (peeraddr, peerState) <- Map.toList sharedPeers + ] + , "sharedTxTable" .= [ (renderTxId txKey, show txEntry) + | (txKey, txEntry) <- IntMap.toList sharedTxTable + ] + , "sharedRetainedTxs" .= [ (renderTxId txKey, show retainUntil) + | (txKey, retainUntil) <- retainedToList sharedRetainedTxs + ] + , "internedTxIds" .= fmap show (Map.keys sharedTxIdToKey) ] _otherwise -> [] - forMachine dtal (TraceTxDecisions decisionMap) = - ("kind" .= String "TraceTxDecisions") - <> case dtal of - DMaximum -> "decisions" .= - let g (TxsToMempool txs) = map (show . fst) txs - f TxDecision {..} = - [( fromIntegral txdTxIdsToAcknowledge :: Int, fromIntegral txdTxIdsToRequest :: Int - , map (first show) . Map.toList $ txdTxsToRequest, g txdTxsToMempool)] - in map (\(peer, decision) -> (show peer, f decision)) . Map.toList $ decisionMap - _otherwise -> - let f TxDecision {..} = txdTxIdsToAcknowledge == 0 && txdTxIdsToRequest == 0 && - Map.null txdTxsToRequest - in "decision-count" .= Map.size (Map.filter (not . f) decisionMap) - instance MetaTrace (TraceTxLogic peeraddr txid tx) where namespaceFor TraceSharedTxState {} = Namespace [] ["TraceSharedTxState"] - namespaceFor TraceTxDecisions {} = - Namespace [] ["TraceTxDecisions"] severityFor _ _ = Just Debug documentFor (Namespace [] ["TraceSharedTxState"]) = - Just "Internal bookkeeping of tx-submission shared state for determining fetch decisions" + Just "Internal bookkeeping of tx-submission shared state for peer coordination" documentFor _ = Nothing allNamespaces = [ - Namespace [] ["TraceSharedTxState"], - Namespace [] ["TraceTxDecisions"] + Namespace [] ["TraceSharedTxState"] ] instance LogFormatting TxSubmissionCounters where forMachine _dtal TxSubmissionCounters {..} = mconcat [ "kind" .= String "TxSubmissionCounters" - , "numOfOutstandingTxIds" .= numOfOutstandingTxIds - , "numOfBufferedTxs" .= numOfBufferedTxs - , "numOfInSubmissionToMempoolTxs" .= numOfInSubmissionToMempoolTxs - , "numOfTxIdsInflight" .= numOfTxIdsInflight + , "txIdMessagesSent" .= txIdMessagesSent + , "txIdsRequested" .= txIdsRequested + , "txIdRepliesReceived" .= txIdRepliesReceived + , "txIdsReceived" .= txIdsReceived + , "txMessagesSent" .= txMessagesSent + , "txsRequested" .= txsRequested + , "txRepliesReceived" .= txRepliesReceived + , "txsReceived" .= txsReceived + , "txsOmitted" .= txsOmitted + , "lateBodies" .= lateBodies ] asMetrics TxSubmissionCounters {..} = - [ IntM "txSubmission.numOfOutstandingTxIds" (fromIntegral numOfOutstandingTxIds) - , IntM "txSubmission.numOfBufferedTxs" (fromIntegral numOfBufferedTxs) - , IntM "txSubmission.numOfInSubmissionToMempoolTxs" (fromIntegral numOfInSubmissionToMempoolTxs) - , IntM "txSubmission.numOfTxIdsInflight" (fromIntegral numOfTxIdsInflight) + [ IntM "txSubmission.txIdMessagesSent" (fromIntegral txIdMessagesSent) + , IntM "txSubmission.txIdsRequested" (fromIntegral txIdsRequested) + , IntM "txSubmission.txIdRepliesReceived" (fromIntegral txIdRepliesReceived) + , IntM "txSubmission.txIdsReceived" (fromIntegral txIdsReceived) + , IntM "txSubmission.txMessagesSent" (fromIntegral txMessagesSent) + , IntM "txSubmission.txsRequested" (fromIntegral txsRequested) + , IntM "txSubmission.txRepliesReceived" (fromIntegral txRepliesReceived) + , IntM "txSubmission.txsReceived" (fromIntegral txsReceived) + , IntM "txSubmission.txsOmitted" (fromIntegral txsOmitted) + , IntM "txSubmission.lateBodies" (fromIntegral lateBodies) ] instance MetaTrace TxSubmissionCounters where @@ -86,10 +130,16 @@ instance MetaTrace TxSubmissionCounters where documentFor _ = Nothing metricsDocFor (Namespace [] ["Counters"]) = - [ ("txSubmission.numOfOutstandingTxIds", "txid's which are not yet downloaded") - , ("txSubmission.numOfBufferedTxs", "tx's which have been recently successfully applied to the mempool") - , ("txSubmission.numOfInSubmissionToMempoolTxs", "number of all tx's which are enqueued to the mempool") - , ("txSubmission.numOfTxIdsInflight", "number of all in-flight txid's") + [ ("txSubmission.txIdMessagesSent", "number of txid request messages sent") + , ("txSubmission.txIdsRequested", "number of txids requested from remote peers") + , ("txSubmission.txIdRepliesReceived", "number of txid reply messages received") + , ("txSubmission.txIdsReceived", "number of txids received in reply batches") + , ("txSubmission.txMessagesSent", "number of tx body request messages sent") + , ("txSubmission.txsRequested", "number of tx bodies requested from remote peers") + , ("txSubmission.txRepliesReceived", "number of tx body reply messages received") + , ("txSubmission.txsReceived", "number of tx bodies received") + , ("txSubmission.txsOmitted", "number of requested tx bodies omitted from replies") + , ("txSubmission.lateBodies", "number of tx bodies received after local resolution") ] metricsDocFor _ = [] diff --git a/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission/Inbound.hs b/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission/Inbound.hs index 147412c28e3..c666d3cc7d0 100644 --- a/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission/Inbound.hs +++ b/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission/Inbound.hs @@ -73,11 +73,14 @@ instance (Show txid, Show tx) , "error" .= Text.pack (show e) ] - forMachine _dtal (TraceTxInboundDecision decision) = - mconcat [ - "kind" .= String "TraceTxInboundDecision" - , "decision" .= Text.pack (show decision) + forMachine dtal (TraceTxInboundRequestTxs txids) = + mconcat + [ "kind" .= String "TraceTxInboundRequestTxs" + , "count" .= toJSON (length txids) ] + <> case dtal of + DDetailed -> "txIds" .= Text.pack (show txids) + _otherwise -> mempty asMetrics (TraceTxSubmissionCollected txids) = [CounterM "submissions.submitted" (Just (length txids))] @@ -106,8 +109,8 @@ instance MetaTrace (TraceTxSubmissionInbound txid tx) where Namespace [] ["RejectedFromMempool"] namespaceFor TraceTxInboundError {} = Namespace [] ["Error"] - namespaceFor TraceTxInboundDecision {} = - Namespace [] ["Decision"] + namespaceFor TraceTxInboundRequestTxs {} = + Namespace [] ["RequestTxs"] severityFor (Namespace _ ["Collected"]) _ = Just Debug severityFor (Namespace _ ["Processed"]) _ = Just Debug @@ -117,7 +120,7 @@ instance MetaTrace (TraceTxSubmissionInbound txid tx) where severityFor (Namespace _ ["AddedToMempool"]) _ = Just Debug severityFor (Namespace _ ["RejectedFromMempool"]) _ = Just Debug severityFor (Namespace _ ["Error"]) _ = Just Debug - severityFor (Namespace _ ["Decision"]) _ = Just Debug + severityFor (Namespace _ ["RequestTxs"]) _ = Just Debug severityFor _ _ = Nothing @@ -151,8 +154,8 @@ instance MetaTrace (TraceTxSubmissionInbound txid tx) where "Transactions rejected from mempool and processing time" documentFor (Namespace _ ["Error"]) = Just "Protocol violation causing connection reset" - documentFor (Namespace _ ["Decision"]) = Just - "Decision to advance the protocol" + documentFor (Namespace _ ["RequestTxs"]) = Just + "Transactions requested from the remote peer" documentFor _ = Nothing allNamespaces = [ @@ -164,5 +167,5 @@ instance MetaTrace (TraceTxSubmissionInbound txid tx) where , Namespace [] ["AddedToMempool"] , Namespace [] ["RejectedFromMempool"] , Namespace [] ["Error"] - , Namespace [] ["Decision"] + , Namespace [] ["RequestTxs"] ] From bacb8ae343ea160b2554732d87a8436c7985319c Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Thu, 9 Apr 2026 19:52:50 +0200 Subject: [PATCH 05/67] fixup: restrict writing of sharedState Use sharedGeneration to track if the sharedState really changed and only write to the tvar if it changed. This makes common operations like receiving and acking a txid that is already retained something that only affects the peer local state. --- .../TxSubmission/Inbound/V2/Registry.hs | 26 +++-- .../Network/TxSubmission/Inbound/V2/State.hs | 95 +++++++++++-------- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 36 ++++++- 3 files changed, 113 insertions(+), 44 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 15f9b56bfc9..32920353863 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -278,6 +278,17 @@ awaitSharedChangeImp sharedStateVar peeraddr generation mDelay = expired <- Lazy.readTVar delayVar check (generation' /= generation || expired) +-- | Avoid rewriting the shared TVar when the pure state step made no shared +-- change. Callers use 'sharedGeneration' as the dirty bit for shared state. +writeSharedStateIfChanged :: MonadSTM m + => SharedTxStateVar m peeraddr txid + -> Word64 + -> SharedTxState peeraddr txid + -> STM m () +writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' + | sharedGeneration sharedState' == sharedGeneration0 = pure () + | otherwise = writeTVar sharedStateVar sharedState' + -- | Compute the next action for this peer in non-pipelined mode. -- -- Returns the selected 'PeerAction', an updated peer-local state, and applies @@ -294,11 +305,12 @@ runNextPeerActionImp :: ( MonadSTM m -> m (PeerAction, PeerTxLocalState tx) runNextPeerActionImp policy sharedStateVar peeraddr now peerState = atomically $ do sharedState <- readTVar sharedStateVar - let (peerAction, peerState', sharedState') = State.nextPeerAction now policy peeraddr + let sharedGeneration0 = sharedGeneration sharedState + (peerAction, peerState', sharedState') = State.nextPeerAction now policy peeraddr peerState sharedState sharedState'' = updatePeerPhase peeraddr (peerPhaseForActionIdle peerAction) sharedState' sharedState''' = updatePeerRequestedTxs policy peeraddr peerState' sharedState'' - writeTVar sharedStateVar sharedState''' + writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState''' return (peerAction, peerState') -- | Compute the next action for this peer in pipelined mode. @@ -317,13 +329,14 @@ runNextPeerActionPipelinedImp :: ( MonadSTM m -> m (PeerAction, PeerTxLocalState tx) runNextPeerActionPipelinedImp policy sharedStateVar peeraddr now peerState = atomically $ do sharedState <- readTVar sharedStateVar - let (peerAction, peerState', sharedState') = State.nextPeerActionPipelined now policy + let sharedGeneration0 = sharedGeneration sharedState + (peerAction, peerState', sharedState') = State.nextPeerActionPipelined now policy peeraddr peerState sharedState sharedState'' = updatePeerPhase peeraddr (peerPhaseForActionPipelined peeraddr peerAction sharedState') sharedState' sharedState''' = updatePeerRequestedTxs policy peeraddr peerState' sharedState'' - writeTVar sharedStateVar sharedState''' + writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState''' return (peerAction, peerState') -- | Process a batch of txids received from this peer. @@ -347,10 +360,11 @@ applyReceivedTxIdsImp policy mempoolGetSnapshot sharedStateVar peeraddr now txId txidsAndSizes peerState = atomically $ do MempoolSnapshot { mempoolHasTx } <- mempoolGetSnapshot sharedState <- readTVar sharedStateVar - let (peerState', sharedState') = State.handleReceivedTxIds mempoolHasTx now policy peeraddr + let sharedGeneration0 = sharedGeneration sharedState + (peerState', sharedState') = State.handleReceivedTxIds mempoolHasTx now policy peeraddr txIdsToReq txidsAndSizes peerState sharedState sharedState'' = updatePeerRequestedTxs policy peeraddr peerState' sharedState' - writeTVar sharedStateVar sharedState'' + writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState'' return peerState' -- | Process a batch of tx bodies received from this peer. diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 86bd680c332..a5fcb0ff8af 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -74,8 +74,13 @@ mkPeerActionContext now policy peeraddr peerState sharedState = where -- Remove expireds TX keys from the shared state sharedState' = - let expiredRetainedKeys = retainedExpiredKeys now (sharedRetainedTxs sharedState) in - dropTxKeys expiredRetainedKeys sharedState + let expiredRetainedKeys = retainedExpiredKeys now (sharedRetainedTxs sharedState) + prunedSharedState = dropTxKeys expiredRetainedKeys sharedState in + if IntSet.null expiredRetainedKeys + then sharedState + else prunedSharedState { + sharedGeneration = sharedGeneration sharedState + 1 + } -- Remove downloaded tx bodies that are no longer in the shared state. peerState' = @@ -542,21 +547,25 @@ acknowledgeTxIds :: (Ord peeraddr, Ord txid) -> SharedTxState peeraddr txid acknowledgeTxIds _ [] st = st acknowledgeTxIds peeraddr acknowledgedTxIds st = - foldl' acknowledgeOne st' acknowledgedTxIds + case foldl' acknowledgeOne (False, st) acknowledgedTxIds of + (False, _) -> st + (True, st') -> st' { sharedGeneration = sharedGeneration st + 1 } where - st' = st { sharedGeneration = sharedGeneration st + 1 } - - removeAdvertiser txEntry@TxEntry { txAdvertisers } = - txEntry { txAdvertisers = Map.delete peeraddr txAdvertisers } - - acknowledgeOne acc (TxKey k) = + acknowledgeOne (sharedChanged, acc) (TxKey k) = case IntMap.lookup k (sharedTxTable acc) of - Just txEntry -> - let txEntry' = removeAdvertiser txEntry in - if activeTxLive txEntry' - then acc { sharedTxTable = IntMap.insert k txEntry' (sharedTxTable acc) } - else dropTxKey k acc - Nothing -> acc + Just txEntry@TxEntry { txAdvertisers } -> + case Map.updateLookupWithKey (\_ _ -> Nothing) peeraddr txAdvertisers of + (Just _, txAdvertisers') -> + let txEntry' = txEntry { txAdvertisers = txAdvertisers' } + acc' = + if activeTxLive txEntry' + then acc { sharedTxTable = IntMap.insert k txEntry' (sharedTxTable acc) } + else dropTxKey k acc + in (True, acc') + (Nothing, _) -> + (sharedChanged, acc) + Nothing -> + (sharedChanged, acc) -- | Determine if an unacknowledged txid is ready to be acknowledged. -- @@ -990,6 +999,7 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , sharedTxTable' , sharedRetainedTxs' , peersToWake + , sharedChanged ) = foldl' step @@ -1001,6 +1011,7 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , sharedTxTable sharedState , sharedRetainedTxs sharedState , Set.empty + , False ) txidsAndSizes @@ -1012,20 +1023,19 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize peerAvailableTxIds = peerAvailableTxIds' } - sharedState' = sharedState { - sharedTxIdToKey = sharedTxIdToKey', - sharedKeyToTxId = sharedKeyToTxId', - sharedNextTxKey = sharedNextTxKey', - sharedTxTable = sharedTxTable', - sharedRetainedTxs = sharedRetainedTxs' - } - - sharedState'' = - bumpIdlePeerGenerations peersToWake sharedState' { - sharedTxTable = sharedTxTable', - sharedRetainedTxs = sharedRetainedTxs', - sharedGeneration = sharedGeneration sharedState' + 1 - } + sharedState'' + | sharedChanged = + bumpIdlePeerGenerations peersToWake $ + sharedState { + sharedTxIdToKey = sharedTxIdToKey', + sharedKeyToTxId = sharedKeyToTxId', + sharedNextTxKey = sharedNextTxKey', + sharedTxTable = sharedTxTable', + sharedRetainedTxs = sharedRetainedTxs', + sharedGeneration = sharedGeneration sharedState + 1 + } + | otherwise = + sharedState retainUntil = addTime (bufferedTxsMinLifetime policy) now @@ -1039,6 +1049,7 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , IntMap.IntMap (TxEntry peeraddr) , RetainedTxs , Set.Set peeraddr + , Bool ) -> (txid, SizeInBytes) -> ( StrictSeq.StrictSeq TxKey @@ -1049,6 +1060,7 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , IntMap.IntMap (TxEntry peeraddr) , RetainedTxs , Set.Set peeraddr + , Bool ) step ( unacknowledgedAcc @@ -1059,6 +1071,7 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , txTableAcc , retainedAcc , peersAcc + , sharedChangedAcc ) (txid, txSize) | retainedMember k retainedAcc = @@ -1070,6 +1083,7 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , txTableAcc , retainedAcc , peersAcc + , sharedChangedAcc ) | mempoolHasTx txid = let wakePeers = case IntMap.lookup k txTableAcc of @@ -1083,6 +1097,7 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , IntMap.delete k txTableAcc , retainedInsertMax k retainUntil retainedAcc , wakePeers + , True ) | otherwise = case IntMap.lookup k txTableAcc of @@ -1101,9 +1116,10 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , IntMap.insert k txEntry txTableAcc , retainedAcc , peersAcc + , True ) Just txEntry -> - let txEntry' = addAdvertiser txSize txEntry + let (entryChanged, txEntry') = addAdvertiser txSize txEntry in ( unacknowledgedAcc StrictSeq.|> txKey , IntMap.insert k txSize availableAcc , txIdToKeyAcc' @@ -1112,6 +1128,7 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , IntMap.insert k txEntry' txTableAcc , retainedAcc , peersAcc + , sharedChangedAcc || entryChanged ) where (txKey@(TxKey k), txIdToKeyAcc', keyToTxIdAcc', nextTxKeyAcc') = @@ -1126,10 +1143,14 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , nextTxKeyAcc + 1 ) - addAdvertiser txSize' txEntry@TxEntry { txAdvertisers } - | Map.member peeraddr txAdvertisers = - txEntry - | otherwise = - txEntry { - txAdvertisers = Map.insert peeraddr (TxAdvertiser AckWhenResolved txSize') txAdvertisers - } + addAdvertiser txSize' txEntry@TxEntry { txAdvertisers } = + case Map.insertLookupWithKey (\_ _ old -> old) + peeraddr + (TxAdvertiser AckWhenResolved txSize') + txAdvertisers of + (Nothing, txAdvertisers') -> + ( True + , txEntry { txAdvertisers = txAdvertisers' } + ) + (Just _, _) -> + (False, txEntry) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index f36787129d0..29b1fb2085b 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -43,6 +43,7 @@ tests = testGroup "TxLogic" [ testProperty "handleReceivedTxIds inserts new tx entries" prop_handleReceivedTxIds_newEntries , testProperty "handleReceivedTxIds resolves txids already in mempool" prop_handleReceivedTxIds_knownToMempool + , testProperty "handleReceivedTxIds keeps retained txids local-only" prop_handleReceivedTxIds_retainedIsLocalOnly , testProperty "handleReceivedTxs buffers received and drops omitted txs" prop_handleReceivedTxs_buffersAndDropsOmitted , testProperty "handleReceivedTxs drops late bodies already retained or in mempool" prop_handleReceivedTxs_dropsLateBodies , testProperty "handleReceivedTxs penalizes omitted txs after full prune" prop_handleReceivedTxs_penalizesOmittedAfterPrune @@ -309,6 +310,37 @@ prop_handleReceivedTxIds_knownToMempool (Positive peeraddr) txid0 txSize0 = key = lookupKeyOrFail txid sharedState' expectedRetainUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now +-- Verifies that txids already retained in shared state only update the peer's +-- local queue and do not dirty the shared state again. +prop_handleReceivedTxIds_retainedIsLocalOnly + :: Positive Int + -> TxId + -> Positive Int + -> Property +prop_handleReceivedTxIds_retainedIsLocalOnly (Positive peeraddr) txid0 txSize0 = + conjoin + [ peerRequestedTxIds peerState' === 0 + , peerAvailableTxIds peerState' === IntMap.empty + , toList (peerUnacknowledgedTxIds peerState') === [key] + , sharedState' === sharedState0 + ] + where + txid = abs txid0 + 1 + txSize = mkSize txSize0 + key = TxKey 0 + k = unTxKey key + retainUntil = addTime 17 now + peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = 1 } + sharedState0 = emptySharedTxState + { sharedRetainedTxs = retainedSingleton k retainUntil + , sharedTxIdToKey = Map.singleton txid key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + , sharedGeneration = 7 + } + (peerState', sharedState') = + handleReceivedTxIds (const False) now defaultTxDecisionPolicy peeraddr 1 [(txid, txSize)] peerState0 sharedState0 + -- Verifies that handleReceivedTxs buffers received bodies and removes omitted -- requested txs from peer and shared state. prop_handleReceivedTxs_buffersAndDropsOmitted @@ -1049,7 +1081,7 @@ prop_nextPeerActionPipelined_requestsTxIds (Positive peeraddr) txid0 _txSize0 = , sharedTxTable sharedState' === sharedTxTable sharedState0 , sharedTxIdToKey sharedState' === sharedTxIdToKey sharedState0 , sharedKeyToTxId sharedState' === sharedKeyToTxId sharedState0 - , sharedGeneration sharedState' === sharedGeneration sharedState0 + 1 + , sharedGeneration sharedState' === sharedGeneration sharedState0 ] _ -> counterexample ("unexpected pipelined action: " ++ show peerAction) False where @@ -1205,6 +1237,7 @@ prop_nextPeerAction_prunesExpiredRetained (Positive peeraddr) txid0 _txSize0 = , retainedLookup k (sharedRetainedTxs sharedState') === Nothing , Map.lookup txid (sharedTxIdToKey sharedState') === Nothing , IntMap.lookup k (sharedKeyToTxId sharedState') === Nothing + , sharedGeneration sharedState' === sharedGeneration sharedState0 + 1 ] _ -> counterexample ("unexpected peer action: " ++ show peerAction) False where @@ -1239,6 +1272,7 @@ prop_nextPeerAction_keepsRetained (Positive peeraddr) txid0 _txSize0 = , Map.lookup txid (sharedTxIdToKey sharedState') === Just key , IntMap.lookup k (sharedKeyToTxId sharedState') === Just txid , wakeDelay === diffTime retainUntil now + , sharedGeneration sharedState' === sharedGeneration sharedState0 ] _ -> counterexample ("unexpected peer action: " ++ show peerAction) False where From d6119a28d59fb598db5cf06873b2c7dcbf0baf9d Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Thu, 9 Apr 2026 20:12:20 +0200 Subject: [PATCH 06/67] fixup: cleanup some shared state Remove shared states that where only written to. --- .../TxSubmission/Inbound/V2/Registry.hs | 51 +--- .../Network/TxSubmission/Inbound/V2/State.hs | 8 +- .../Network/TxSubmission/Inbound/V2/Types.hs | 8 +- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 240 ++++-------------- 4 files changed, 67 insertions(+), 240 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 32920353863..dd8ff40eca7 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -12,7 +12,6 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.Registry , withPeer -- Exported for testing , updatePeerPhase - , updatePeerRequestedTxs ) where import Control.Concurrent.Class.MonadSTM qualified as Lazy @@ -25,7 +24,6 @@ import Data.IntMap.Strict qualified as IntMap import Data.IntSet qualified as IntSet import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map -import Data.Sequence.Strict qualified as StrictSeq import Data.Set qualified as Set import Data.Void (Void) import Data.Word (Word64) @@ -190,9 +188,7 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar sharedPeerState = SharedPeerState { sharedPeerPhase = PeerIdle, sharedPeerScore = emptyPeerScore now, - sharedPeerGeneration = 0, - sharedPeerRequestedTxBatches = 0, - sharedPeerRequestedTxsSize = 0 + sharedPeerGeneration = 0 } unregisterPeer :: SharedTxState peeraddr txid -> SharedTxState peeraddr txid @@ -309,8 +305,7 @@ runNextPeerActionImp policy sharedStateVar peeraddr now peerState = atomically $ (peerAction, peerState', sharedState') = State.nextPeerAction now policy peeraddr peerState sharedState sharedState'' = updatePeerPhase peeraddr (peerPhaseForActionIdle peerAction) sharedState' - sharedState''' = updatePeerRequestedTxs policy peeraddr peerState' sharedState'' - writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState''' + writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState'' return (peerAction, peerState') -- | Compute the next action for this peer in pipelined mode. @@ -335,8 +330,7 @@ runNextPeerActionPipelinedImp policy sharedStateVar peeraddr now peerState = ato sharedState'' = updatePeerPhase peeraddr (peerPhaseForActionPipelined peeraddr peerAction sharedState') sharedState' - sharedState''' = updatePeerRequestedTxs policy peeraddr peerState' sharedState'' - writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState''' + writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState'' return (peerAction, peerState') -- | Process a batch of txids received from this peer. @@ -363,8 +357,7 @@ applyReceivedTxIdsImp policy mempoolGetSnapshot sharedStateVar peeraddr now txId let sharedGeneration0 = sharedGeneration sharedState (peerState', sharedState') = State.handleReceivedTxIds mempoolHasTx now policy peeraddr txIdsToReq txidsAndSizes peerState sharedState - sharedState'' = updatePeerRequestedTxs policy peeraddr peerState' sharedState' - writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState'' + writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' return peerState' -- | Process a batch of tx bodies received from this peer. @@ -389,11 +382,10 @@ applyReceivedTxsImp policy mempoolGetSnapshot sharedStateVar countersVar peeradd peerState = atomically $ do MempoolSnapshot { mempoolHasTx } <- mempoolGetSnapshot sharedState <- readTVar sharedStateVar + let sharedGeneration0 = sharedGeneration sharedState let (omittedCount, lateCount, peerState', sharedState') = State.handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState - sharedState'' = - updatePeerRequestedTxs policy peeraddr peerState' sharedState' - writeTVar sharedStateVar sharedState'' + writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' modifyTVar countersVar (<> mempty { txRepliesReceived = 1, txsReceived = fromIntegral (length txs), @@ -422,10 +414,10 @@ applySubmittedTxsImp :: ( MonadSTM m applySubmittedTxsImp policy sharedStateVar peeraddr now acceptedTxs rejectedTxs peerState = atomically $ do sharedState <- readTVar sharedStateVar + let sharedGeneration0 = sharedGeneration sharedState let (peerState', sharedState') = State.handleSubmittedTxs now policy peeraddr acceptedTxs rejectedTxs peerState sharedState - sharedState'' = updatePeerRequestedTxs policy peeraddr peerState' sharedState' - writeTVar sharedStateVar sharedState'' + writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' return peerState' -- | Update the peer's rejection score based on the number of txs rejected @@ -548,33 +540,6 @@ updatePeerPhase peeraddr peerPhaseNew st@SharedTxState { sharedPeers, sharedGene | otherwise = Set.empty --- | Update the peer's shared TX state so that it is in sync with its local state. -updatePeerRequestedTxs - :: Ord peeraddr - => TxDecisionPolicy - -> peeraddr - -> PeerTxLocalState tx - -> SharedTxState peeraddr txid - -> SharedTxState peeraddr txid -updatePeerRequestedTxs _policy peeraddr peerState - st@SharedTxState { sharedPeers, sharedGeneration } = - case Map.lookup peeraddr sharedPeers of - Just sharedPeerState -> - if sharedPeerRequestedTxBatches sharedPeerState /= requestedTxBatches - || sharedPeerRequestedTxsSize sharedPeerState /= requestedTxsSize - then - let sharedPeerState' = sharedPeerState { - sharedPeerRequestedTxBatches = requestedTxBatches - , sharedPeerRequestedTxsSize = requestedTxsSize } - sharedPeers' = Map.insert peeraddr sharedPeerState' sharedPeers in - st { sharedPeers = sharedPeers' - , sharedGeneration = sharedGeneration + 1 } - else st - _ -> st -- TODO: error? - where - requestedTxBatches = StrictSeq.length (peerRequestedTxBatches peerState) - requestedTxsSize = peerRequestedTxsSize peerState - advertisersForPeerTxsExcept :: Ord peeraddr => peeraddr diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index a5fcb0ff8af..2b74e761479 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -1104,7 +1104,7 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize Nothing -> let txEntry = TxEntry { txLease = TxLeased peeraddr (addTime (interTxSpace policy) now), - txAdvertisers = Map.singleton peeraddr (TxAdvertiser AckWhenBuffered txSize), + txAdvertisers = Map.singleton peeraddr (TxAdvertiser AckWhenBuffered), txTieBreakSalt = k, txAttempts = Map.empty } @@ -1119,7 +1119,7 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , True ) Just txEntry -> - let (entryChanged, txEntry') = addAdvertiser txSize txEntry + let (entryChanged, txEntry') = addAdvertiser txEntry in ( unacknowledgedAcc StrictSeq.|> txKey , IntMap.insert k txSize availableAcc , txIdToKeyAcc' @@ -1143,10 +1143,10 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , nextTxKeyAcc + 1 ) - addAdvertiser txSize' txEntry@TxEntry { txAdvertisers } = + addAdvertiser txEntry@TxEntry { txAdvertisers } = case Map.insertLookupWithKey (\_ _ old -> old) peeraddr - (TxAdvertiser AckWhenResolved txSize') + (TxAdvertiser AckWhenResolved) txAdvertisers of (Nothing, txAdvertisers') -> ( True diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 61e27eb1240..7235eea33be 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -113,10 +113,8 @@ data TxOwnerAckState -- | Per-peer advertisement state for a tx. -- --- The advertised size must be tracked per peer. data TxAdvertiser = TxAdvertiser { - txAckState :: !TxOwnerAckState, - txAdvertisedSize :: !SizeInBytes + txAckState :: !TxOwnerAckState } deriving stock (Eq, Show, Generic) @@ -329,9 +327,7 @@ emptyPeerTxLocalState = PeerTxLocalState { data SharedPeerState = SharedPeerState { sharedPeerPhase :: !PeerPhase, sharedPeerScore :: !PeerScore, - sharedPeerGeneration :: !Word64, - sharedPeerRequestedTxBatches :: !Int, - sharedPeerRequestedTxsSize :: !SizeInBytes + sharedPeerGeneration :: !Word64 } deriving stock (Eq, Show, Generic) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 29b1fb2085b..d0164f01af5 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -26,7 +26,7 @@ import Data.Word (Word64) import Ouroboros.Network.Protocol.TxSubmission2.Type import Ouroboros.Network.TxSubmission.Inbound.V2.Policy import Ouroboros.Network.TxSubmission.Inbound.V2.Registry - (updatePeerPhase, updatePeerRequestedTxs) + (updatePeerPhase) import Ouroboros.Network.TxSubmission.Inbound.V2.State import Ouroboros.Network.TxSubmission.Inbound.V2.Types @@ -68,8 +68,6 @@ tests = , testProperty "handleSubmittedTxs bumps idle advertiser generations" prop_handleSubmittedTxs_bumpsIdleAdvertisers , testCaseSteps "updatePeerPhase only wakes the peer becoming idle" unit_updatePeerPhase_wakesOnlyBecomingIdlePeer , testCaseSteps "updatePeerPhase wakes competing idle advertisers when a peer leaves idle" unit_updatePeerPhase_wakesCompetingAdvertisers - , testCaseSteps "updatePeerRequestedTxs does not wake advertisers when the owner becomes saturated" unit_updatePeerRequestedTxs_ignoresSaturation - , testCaseSteps "updatePeerRequestedTxs ignores unsaturated inflight changes" unit_updatePeerRequestedTxs_ignoresUnsaturatedChanges ] -- @@ -205,10 +203,7 @@ instance Arbitrary ArbSharedPeerState where | otherwise = [ ArbSharedPeerState defaultPeerState , ArbSharedPeerState peerState - { sharedPeerGeneration = 0 - , sharedPeerRequestedTxBatches = 0 - , sharedPeerRequestedTxsSize = 0 - } + { sharedPeerGeneration = 0 } ] where defaultPeerState = mkSharedPeerState PeerIdle (emptyPeerScore now) @@ -273,13 +268,13 @@ prop_handleReceivedTxIds_newEntries (Positive peeraddr) (ArbSharedTxState shared checkExistingTxId (txid, txKey) = Map.lookup txid (sharedTxIdToKey sharedState') === Just txKey - checkEntry (txid, txSize) = + checkEntry (txid, _) = case IntMap.lookup (unTxKey (lookupKeyOrFail txid sharedState')) (sharedTxTable sharedState') of Nothing -> counterexample ("missing tx entry for " ++ show txid) False Just TxEntry { txLease, txAdvertisers, txAttempts } -> conjoin [ txLease === TxLeased peeraddr expectedLeaseUntil - , txAdvertisers === Map.singleton peeraddr (mkAdvertiser AckWhenBuffered txSize) + , txAdvertisers === Map.singleton peeraddr (mkAdvertiser AckWhenBuffered) , txAttempts === Map.empty ] @@ -576,7 +571,7 @@ prop_nextPeerAction_prioritisesSubmit (Positive peeraddr) txid0 txSize0 = { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenBuffered txSize) + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenBuffered) , txTieBreakSalt = txid , txAttempts = Map.singleton peeraddr TxBuffered } @@ -631,7 +626,7 @@ prop_nextPeerAction_claimsClaimableTx (Positive peerA0) (Positive peerB0) (Posit , sharedNextTxKey = 1 , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxClaimable - , txAdvertisers = mkAdvertisers txSize + , txAdvertisers = mkAdvertisers [ (peerA, AckWhenResolved) , (peerB, AckWhenResolved) , (peerC, AckWhenResolved) @@ -683,7 +678,7 @@ prop_nextPeerAction_claimsExpiredLease (Positive oldOwner0) (Positive peerA0) (P , sharedNextTxKey = 1 , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased oldOwner (Time 0) - , txAdvertisers = mkAdvertisers txSize + , txAdvertisers = mkAdvertisers [ (oldOwner, AckWhenResolved) , (peerA, AckWhenResolved) , (peerB, AckWhenResolved) @@ -762,19 +757,19 @@ unit_nextPeerAction_skipsBlockedAvailableTxs step = do sharedState :: SharedTxState PeerAddr TxId sharedState = emptySharedTxState { sharedPeers = Map.fromList - [ (peeraddr, SharedPeerState PeerIdle (emptyPeerScore testNow) 0 0 0) - , (otherPeer, SharedPeerState PeerIdle (emptyPeerScore testNow) 0 0 0) + [ (peeraddr, SharedPeerState PeerIdle (emptyPeerScore testNow) 0) + , (otherPeer, SharedPeerState PeerIdle (emptyPeerScore testNow) 0) ] , sharedTxTable = IntMap.fromList [ (kBlocked, TxEntry { txLease = TxLeased otherPeer (addTime 10 testNow) - , txAdvertisers = mkAdvertisers 10 [(peeraddr, AckWhenResolved), (otherPeer, AckWhenResolved)] + , txAdvertisers = mkAdvertisers [(peeraddr, AckWhenResolved), (otherPeer, AckWhenResolved)] , txTieBreakSalt = 0 , txAttempts = Map.empty }) , (kClaimable, TxEntry { txLease = TxClaimable - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved 11) + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved) , txTieBreakSalt = 0 , txAttempts = Map.empty }) @@ -809,7 +804,7 @@ prop_nextPeerAction_ownerSubmitsBuffered (Positive peeraddr) txid0 txSize0 = { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxClaimable - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenBuffered txSize) + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenBuffered) , txTieBreakSalt = txid , txAttempts = Map.singleton peeraddr TxBuffered } @@ -855,7 +850,7 @@ unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx step = do blockedTx = mkTx blockedTxid blockedSize blockedEntry = TxEntry { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertisers = mkAdvertisers blockedSize + , txAdvertisers = mkAdvertisers [ (peeraddr, AckWhenBuffered) , (submittingPeer, AckWhenResolved) ] @@ -874,7 +869,7 @@ unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx step = do [ (kBlocked, blockedEntry) , (kClaimable, TxEntry { txLease = TxClaimable - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved claimableSize) + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved) , txTieBreakSalt = claimableTxid , txAttempts = Map.empty }) @@ -928,7 +923,7 @@ unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx step = do blockedTx = mkTx blockedTxid blockedSize blockedEntry = TxEntry { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertisers = mkAdvertisers blockedSize + , txAdvertisers = mkAdvertisers [ (peeraddr, AckWhenBuffered) , (submittingPeer, AckWhenResolved) ] @@ -968,9 +963,8 @@ prop_nextPeerAction_nonOwnerWaitsUntilResolved :: Positive Int -> Positive Int -> TxId - -> Positive Int -> Property -prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peeraddr0) txid0 txSize0 = +prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peeraddr0) txid0 = owner /= peeraddr ==> conjoin [ case unresolvedAction of @@ -994,14 +988,13 @@ prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peera owner = owner0 + 1000 peeraddr = peeraddr0 + 2000 txid = abs txid0 + 1 - txSize = mkSize txSize0 key = TxKey 0 k = unTxKey key sharedPeers0 = Map.fromList [ (owner, mkSharedPeerState PeerIdle (emptyPeerScore now)) , (peeraddr, mkSharedPeerState PeerIdle (emptyPeerScore now)) ] - txAdvertisers0 = mkAdvertisers txSize [(owner, AckWhenBuffered), (peeraddr, AckWhenResolved)] + txAdvertisers0 = mkAdvertisers [(owner, AckWhenBuffered), (peeraddr, AckWhenResolved)] unresolvedSharedState = emptySharedTxState { sharedPeers = sharedPeers0 , sharedTxTable = IntMap.singleton k TxEntry @@ -1147,7 +1140,7 @@ prop_nextPeerActionPipelined_secondBodyBatch (Positive peeraddr) txidA0 txidB0 t [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading)) , (kB, TxEntry { txLease = TxClaimable - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved txSizeB) + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved) , txTieBreakSalt = txidB , txAttempts = Map.empty }) @@ -1210,7 +1203,7 @@ prop_nextPeerActionPipelined_noThirdBodyBatch (Positive peeraddr) txidA0 txidB0 , (kB, mkTxEntry peeraddr txSizeB (Just TxDownloading)) , (kC, TxEntry { txLease = TxClaimable - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved txSizeC) + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved) , txTieBreakSalt = txidC , txAttempts = Map.empty }) @@ -1301,7 +1294,7 @@ prop_nextPeerAction_earliestWakeDelay -> Positive Int -> Positive Int -> Property -prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txidA0 txidB0 txSizeA0 _txSizeB0 = +prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txidA0 txidB0 _txSizeA0 _txSizeB0 = peeraddr /= owner ==> conjoin [ case leaseFirstAction of @@ -1315,7 +1308,6 @@ prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txid owner = owner0 + 1000 txidA = abs txidA0 + 1 txidB = abs txidB0 + 2 - txSizeA = mkSize txSizeA0 leaseUntil = addTime 11 now retainUntilLater = addTime 29 now leaseUntilLater = addTime 31 now @@ -1331,7 +1323,7 @@ prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txid { sharedPeers = sharedPeers0 , sharedTxTable = IntMap.singleton (unTxKey keyA) TxEntry { txLease = TxLeased owner leaseUntil - , txAdvertisers = mkAdvertisers txSizeA [(owner, AckWhenBuffered), (peeraddr, AckWhenResolved)] + , txAdvertisers = mkAdvertisers [(owner, AckWhenBuffered), (peeraddr, AckWhenResolved)] , txTieBreakSalt = txidA , txAttempts = Map.empty } @@ -1344,7 +1336,7 @@ prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txid { sharedPeers = sharedPeers0 , sharedTxTable = IntMap.singleton (unTxKey keyA) TxEntry { txLease = TxLeased owner leaseUntilLater - , txAdvertisers = mkAdvertisers txSizeA [(owner, AckWhenBuffered), (peeraddr, AckWhenResolved)] + , txAdvertisers = mkAdvertisers [(owner, AckWhenBuffered), (peeraddr, AckWhenResolved)] , txTieBreakSalt = txidA , txAttempts = Map.empty } @@ -1418,7 +1410,7 @@ prop_handleSubmittedTxs_bumpsIdleAdvertisers (Positive owner0) (Positive peerA0) ] , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased owner (addTime 10 now) - , txAdvertisers = mkAdvertisers txSize + , txAdvertisers = mkAdvertisers [ (owner, AckWhenBuffered) , (peerA, AckWhenResolved) , (peerB, AckWhenResolved) @@ -1464,7 +1456,6 @@ unit_updatePeerPhase_wakesCompetingAdvertisers step = do competingPeer = 2 unrelatedPeer = 3 txid = 1 - txSize = 10 baseState = mkSharedState [txid] key = lookupKeyOrFail txid baseState k = unTxKey key @@ -1476,7 +1467,7 @@ unit_updatePeerPhase_wakesCompetingAdvertisers step = do ] , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxClaimable - , txAdvertisers = mkAdvertisers txSize + , txAdvertisers = mkAdvertisers [ (leavingPeer, AckWhenResolved) , (competingPeer, AckWhenResolved) ] @@ -1486,129 +1477,17 @@ unit_updatePeerPhase_wakesCompetingAdvertisers step = do } sharedState' = updatePeerPhase leavingPeer PeerWaitingTxs sharedState0 -unit_updatePeerRequestedTxs_ignoresSaturation :: (String -> IO ()) -> Assertion -unit_updatePeerRequestedTxs_ignoresSaturation step = do - step "Update requested-tx counters across the saturation threshold" - sharedPeerRequestedTxBatches (lookupPeerOrFail owner sharedState') @?= 2 - step "Assert saturation no longer wakes competing advertisers" - sharedPeerGeneration (lookupPeerOrFail owner sharedState') @?= 3 - sharedPeerGeneration (lookupPeerOrFail competingPeer sharedState') @?= 7 - sharedPeerGeneration (lookupPeerOrFail unrelatedPeer sharedState') @?= 13 - where - owner = 1 - competingPeer = 2 - unrelatedPeer = 3 - txid = 1 - txSize = 10 - policy = - defaultTxDecisionPolicy { - maxOutstandingTxBatchesPerPeer = 2 - } - baseState = mkSharedState [txid] - key = lookupKeyOrFail txid baseState - k = unTxKey key - sharedState0 = baseState - { sharedPeers = Map.fromList - [ (owner, (mkSharedPeerState PeerWaitingTxs (emptyPeerScore now)) { - sharedPeerGeneration = 3, - sharedPeerRequestedTxBatches = 1, - sharedPeerRequestedTxsSize = txSize - }) - , (competingPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 7 }) - , (unrelatedPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 13 }) - ] - , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxLeased owner (addTime 10 now) - , txAdvertisers = mkAdvertisers txSize - [ (owner, AckWhenBuffered) - , (competingPeer, AckWhenResolved) - ] - , txTieBreakSalt = txid - , txAttempts = Map.empty - } - } - peerState' = emptyPeerTxLocalState - { peerRequestedTxBatches = - StrictSeq.fromList - [ mkRequestedTxBatch [key] txSize - , mkRequestedTxBatch [TxKey 42] txSize - ] - , peerRequestedTxsSize = txSize * 2 - } - sharedState' = updatePeerRequestedTxs policy owner peerState' sharedState0 - -unit_updatePeerRequestedTxs_ignoresUnsaturatedChanges :: (String -> IO ()) -> Assertion -unit_updatePeerRequestedTxs_ignoresUnsaturatedChanges step = do - step "Update requested-tx counters without saturating the owner" - sharedPeerRequestedTxBatches (lookupPeerOrFail owner sharedState') @?= 2 - step "Assert unsaturated inflight changes do not wake any peer" - sharedPeerGeneration (lookupPeerOrFail owner sharedState') @?= 3 - sharedPeerGeneration (lookupPeerOrFail competingPeer sharedState') @?= 7 - sharedPeerGeneration (lookupPeerOrFail unrelatedPeer sharedState') @?= 13 - where - owner = 1 - competingPeer = 2 - unrelatedPeer = 3 - txid = 1 - txSize = 10 - policy = - defaultTxDecisionPolicy { - maxOutstandingTxBatchesPerPeer = 3 - } - baseState = mkSharedState [txid] - key = lookupKeyOrFail txid baseState - k = unTxKey key - sharedState0 = baseState - { sharedPeers = Map.fromList - [ (owner, (mkSharedPeerState PeerWaitingTxs (emptyPeerScore now)) { - sharedPeerGeneration = 3, - sharedPeerRequestedTxBatches = 1, - sharedPeerRequestedTxsSize = txSize - }) - , (competingPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 7 }) - , (unrelatedPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 13 }) - ] - , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxLeased owner (addTime 10 now) - , txAdvertisers = mkAdvertisers txSize - [ (owner, AckWhenBuffered) - , (competingPeer, AckWhenResolved) - ] - , txTieBreakSalt = txid - , txAttempts = Map.empty - } - } - peerState' = emptyPeerTxLocalState - { peerRequestedTxBatches = - StrictSeq.fromList - [ mkRequestedTxBatch [key] txSize - , mkRequestedTxBatch [TxKey 42] txSize - ] - , peerRequestedTxsSize = txSize * 2 - } - sharedState' = updatePeerRequestedTxs policy owner peerState' sharedState0 - --- Generate a peer state that respects phase-specific inflight fields. +-- Generate a shared peer state. genSharedPeerState :: Gen SharedPeerState genSharedPeerState = do sharedPeerPhase <- elements [PeerIdle, PeerWaitingTxIds, PeerWaitingTxs, PeerSubmittingToMempool] peerScoreValue <- choose (0 :: Double, 100) peerScoreTs <- genSmallTime sharedPeerGeneration <- genSmallWord64 - (sharedPeerRequestedTxBatches, sharedPeerRequestedTxsSize) <- - case sharedPeerPhase of - PeerWaitingTxs -> do - batches <- chooseInt (1, 3) - totalSize <- genPositiveSize - pure (batches, totalSize) - _ -> - pure (0, 0) pure SharedPeerState { sharedPeerPhase, sharedPeerScore = PeerScore peerScoreValue peerScoreTs, - sharedPeerGeneration, - sharedPeerRequestedTxBatches, - sharedPeerRequestedTxsSize + sharedPeerGeneration } -- Generate a self-consistent local peer view of requested, available, and downloaded txs. @@ -1678,9 +1557,8 @@ data PeerSeed = PeerSeed { } data PeerDerivedUsage = PeerDerivedUsage { - peerHasSubmitting :: !Bool - , peerRequestedBatches :: !Int - , peerRequestedTxsBytes :: !SizeInBytes + peerHasSubmitting :: !Bool + , peerHasRequestedTxs :: !Bool } -- Generate a shared tx state with distinct active and retained entries. @@ -1772,21 +1650,19 @@ genOwnedAdvertisers genOwnedAdvertisers advertiserPeers owner = Map.fromList <$> mapM genAdvertiser advertiserPeers where - genAdvertiser peeraddr = do - txAdvertisedSize <- genPositiveSize + genAdvertiser peeraddr = let txAckState | peeraddr == owner = AckWhenBuffered | otherwise = AckWhenResolved - pure (peeraddr, TxAdvertiser { txAckState, txAdvertisedSize }) + in pure (peeraddr, TxAdvertiser { txAckState }) -- Generate advertisers that all acknowledge on resolution. genResolvedAdvertisers :: [PeerAddr] -> Gen (Map.Map PeerAddr TxAdvertiser) genResolvedAdvertisers advertiserPeers = Map.fromList <$> mapM genAdvertiser advertiserPeers where - genAdvertiser peeraddr = do - txAdvertisedSize <- genPositiveSize - pure (peeraddr, TxAdvertiser { txAckState = AckWhenResolved, txAdvertisedSize }) + genAdvertiser peeraddr = + pure (peeraddr, TxAdvertiser { txAckState = AckWhenResolved }) -- Rebuild a shared state from tx-centric fixtures while preserving interned keys. buildSharedTxState @@ -1813,7 +1689,7 @@ buildSharedTxState peerSeeds activeEntries retainedEntries sharedGeneration = where baseState = mkSharedState (fmap fst activeEntries <> fmap fst retainedEntries) --- Derive peer phases and inflight counters from the generated tx entries. +-- Derive peer phases from the generated tx entries. deriveSharedPeers :: Map.Map PeerAddr PeerSeed -> [(TxId, TxEntry PeerAddr)] @@ -1833,19 +1709,16 @@ deriveSharedPeers peerSeeds activeEntries = buildPeerState peeraddr PeerSeed { peerSeedScore, peerSeedGeneration } = let PeerDerivedUsage { peerHasSubmitting, - peerRequestedBatches, - peerRequestedTxsBytes + peerHasRequestedTxs } = Map.findWithDefault emptyPeerDerivedUsage peeraddr peerUsages sharedPeerPhase | peerHasSubmitting = PeerSubmittingToMempool - | peerRequestedBatches > 0 = PeerWaitingTxs + | peerHasRequestedTxs = PeerWaitingTxs | otherwise = PeerIdle in SharedPeerState { sharedPeerPhase, sharedPeerScore = peerSeedScore, - sharedPeerGeneration = peerSeedGeneration, - sharedPeerRequestedTxBatches = peerRequestedBatches, - sharedPeerRequestedTxsSize = peerRequestedTxsBytes + sharedPeerGeneration = peerSeedGeneration } defaultPeerSeed = @@ -1859,8 +1732,7 @@ emptyPeerDerivedUsage :: PeerDerivedUsage emptyPeerDerivedUsage = PeerDerivedUsage { peerHasSubmitting = False, - peerRequestedBatches = 0, - peerRequestedTxsBytes = 0 + peerHasRequestedTxs = False } -- Fold one tx entry's attempts into the derived per-peer usage map. @@ -1868,31 +1740,28 @@ accumulatePeerUsage :: Map.Map PeerAddr PeerDerivedUsage -> (TxId, TxEntry PeerAddr) -> Map.Map PeerAddr PeerDerivedUsage -accumulatePeerUsage acc (_, TxEntry { txAdvertisers, txAttempts }) = +accumulatePeerUsage acc (_, TxEntry { txAttempts }) = foldl' step acc (Map.toList txAttempts) where step acc' (peeraddr, attempt) = case attempt of TxDownloading -> - updatePeerUsage peeraddr False 1 (advertisedSize peeraddr) acc' + updatePeerUsage peeraddr False True acc' TxSubmitting -> - updatePeerUsage peeraddr True 0 0 acc' + updatePeerUsage peeraddr True False acc' TxBuffered -> - updatePeerUsage peeraddr False 0 0 acc' + updatePeerUsage peeraddr False False acc' TxNoAttempt -> acc' - advertisedSize peeraddr = maybe 0 txAdvertisedSize (Map.lookup peeraddr txAdvertisers) - -- Merge one peer's submitting and inflight usage into the accumulator. updatePeerUsage :: PeerAddr -> Bool - -> Int - -> SizeInBytes + -> Bool -> Map.Map PeerAddr PeerDerivedUsage -> Map.Map PeerAddr PeerDerivedUsage -updatePeerUsage peeraddr submitting requestedBatches requestedTxsBytes acc = +updatePeerUsage peeraddr submitting hasRequestedTxs acc = Map.insert peeraddr usage' acc where usage = @@ -1900,8 +1769,7 @@ updatePeerUsage peeraddr submitting requestedBatches requestedTxsBytes acc = usage' = usage { peerHasSubmitting = peerHasSubmitting usage || submitting, - peerRequestedBatches = peerRequestedBatches usage + requestedBatches, - peerRequestedTxsBytes = peerRequestedTxsBytes usage + requestedTxsBytes + peerHasRequestedTxs = peerHasRequestedTxs usage || hasRequestedTxs } -- Collect every peer mentioned by a tx entry. @@ -2062,15 +1930,13 @@ mkTx txid txSize = Tx , getTxParent = Nothing } --- Construct a peer fixture with zeroed generation and inflight counters. +-- Construct a peer fixture with zeroed generation. mkSharedPeerState :: PeerPhase -> PeerScore -> SharedPeerState mkSharedPeerState sharedPeerPhase sharedPeerScore = SharedPeerState { sharedPeerPhase, sharedPeerScore, - sharedPeerGeneration = 0, - sharedPeerRequestedTxBatches = 0, - sharedPeerRequestedTxsSize = 0 + sharedPeerGeneration = 0 } -- Intern a list of txids into an otherwise empty shared state. @@ -2078,12 +1944,12 @@ mkSharedState :: [TxId] -> SharedTxState PeerAddr TxId mkSharedState txids = snd (internTxIds txids emptySharedTxState) -- Construct a single advertiser entry. -mkAdvertiser :: TxOwnerAckState -> SizeInBytes -> TxAdvertiser -mkAdvertiser txAckState txAdvertisedSize = TxAdvertiser { txAckState, txAdvertisedSize } +mkAdvertiser :: TxOwnerAckState -> TxAdvertiser +mkAdvertiser txAckState = TxAdvertiser { txAckState } --- Construct an advertiser map that shares one advertised size across peers. -mkAdvertisers :: SizeInBytes -> [(PeerAddr, TxOwnerAckState)] -> Map.Map PeerAddr TxAdvertiser -mkAdvertisers txSize = Map.fromList . fmap (\(peeraddr, txAckState) -> (peeraddr, mkAdvertiser txAckState txSize)) +-- Construct an advertiser map for a set of peers. +mkAdvertisers :: [(PeerAddr, TxOwnerAckState)] -> Map.Map PeerAddr TxAdvertiser +mkAdvertisers = Map.fromList . fmap (\(peeraddr, txAckState) -> (peeraddr, mkAdvertiser txAckState)) -- Construct a requested batch together with its cached key set. mkRequestedTxBatch :: [TxKey] -> SizeInBytes -> RequestedTxBatch @@ -2094,9 +1960,9 @@ mkRequestedTxBatch keys requestedTxBatchSize = RequestedTxBatch -- Construct a leased tx entry owned by one peer. mkTxEntry :: PeerAddr -> SizeInBytes -> Maybe TxAttemptState -> TxEntry PeerAddr -mkTxEntry peeraddr txSize mAttempt = TxEntry +mkTxEntry peeraddr _txSize mAttempt = TxEntry { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenBuffered txSize) + , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenBuffered) , txTieBreakSalt = 0 , txAttempts = maybe Map.empty (Map.singleton peeraddr) mAttempt } From 88dabca233e74f43c370340c45ef537e41d5b883 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 10 Apr 2026 08:45:46 +0200 Subject: [PATCH 07/67] fixup: strictness in foldl accumulators --- .../Network/TxSubmission/Inbound/V2/State.hs | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 2b74e761479..a802829ecc9 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE BangPatterns #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -548,24 +549,24 @@ acknowledgeTxIds :: (Ord peeraddr, Ord txid) acknowledgeTxIds _ [] st = st acknowledgeTxIds peeraddr acknowledgedTxIds st = case foldl' acknowledgeOne (False, st) acknowledgedTxIds of - (False, _) -> st + (False, _) -> st (True, st') -> st' { sharedGeneration = sharedGeneration st + 1 } where - acknowledgeOne (sharedChanged, acc) (TxKey k) = + acknowledgeOne acc0@(_, acc) (TxKey k) = case IntMap.lookup k (sharedTxTable acc) of Just txEntry@TxEntry { txAdvertisers } -> case Map.updateLookupWithKey (\_ _ -> Nothing) peeraddr txAdvertisers of (Just _, txAdvertisers') -> let txEntry' = txEntry { txAdvertisers = txAdvertisers' } - acc' = + !acc' = if activeTxLive txEntry' then acc { sharedTxTable = IntMap.insert k txEntry' (sharedTxTable acc) } else dropTxKey k acc in (True, acc') (Nothing, _) -> - (sharedChanged, acc) + acc0 Nothing -> - (sharedChanged, acc) + acc0 -- | Determine if an unacknowledged txid is ready to be acknowledged. -- @@ -1063,15 +1064,15 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , Bool ) step - ( unacknowledgedAcc - , availableAcc - , txIdToKeyAcc - , keyToTxIdAcc - , nextTxKeyAcc - , txTableAcc - , retainedAcc - , peersAcc - , sharedChangedAcc + ( !unacknowledgedAcc + , !availableAcc + , !txIdToKeyAcc + , !keyToTxIdAcc + , !nextTxKeyAcc + , !txTableAcc + , !retainedAcc + , !peersAcc + , !sharedChangedAcc ) (txid, txSize) | retainedMember k retainedAcc = From 3a11a1f2e201291f9587311c9d2948b6bccc01db Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 10 Apr 2026 09:34:57 +0200 Subject: [PATCH 08/67] fixup: NFData instances --- .../Network/TxSubmission/Inbound/V2/Types.hs | 35 +++++++++++++------ 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 7235eea33be..de47cebfecb 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE ExistentialQuantification #-} @@ -67,6 +68,7 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.Types , emptySharedTxState ) where +import Control.DeepSeq (NFData) import Control.Exception (Exception (..)) import Control.Monad.Class.MonadTime.SI import Data.Map.Strict (Map) @@ -98,7 +100,7 @@ const_MAX_TX_SIZE_DISCREPANCY = 32 -- | Compact internal transaction key used by V2 state. newtype TxKey = TxKey { unTxKey :: Int } deriving stock (Eq, Ord, Show, Generic) - deriving newtype (Enum) + deriving newtype (Enum, NFData) -- | State which determines when a peer that advertised a txid may -- acknowledge it. @@ -110,13 +112,15 @@ data TxOwnerAckState = AckWhenBuffered | AckWhenResolved deriving stock (Eq, Ord, Show, Generic) + deriving anyclass NFData -- | Per-peer advertisement state for a tx. -- -data TxAdvertiser = TxAdvertiser { - txAckState :: !TxOwnerAckState +newtype TxAdvertiser = TxAdvertiser { + txAckState :: TxOwnerAckState } deriving stock (Eq, Show, Generic) + deriving newtype NFData -- | Per-peer attempt state for one tx body. @@ -143,6 +147,7 @@ data TxAttemptState -- system (either accepted into the mempool or rejected). TxSubmitting deriving stock (Eq, Ord, Show, Generic) + deriving anyclass NFData -- | The current download lease for a tx body. -- @@ -151,6 +156,7 @@ data TxAttemptState data TxLease peeraddr = TxLeased !peeraddr !Time | TxClaimable deriving stock (Eq, Show, Generic) + deriving anyclass NFData -- | Shared per-tx state. -- @@ -160,18 +166,19 @@ data TxLease peeraddr = TxLeased !peeraddr !Time -- and another eligible advertiser may atomically claim it. data TxEntry peeraddr = TxEntry { -- | Current owner lease for downloading the tx body. - txLease :: !(TxLease peeraddr), + txLease :: !(TxLease peeraddr), -- | Peers that have advertised this tx. - txAdvertisers :: !(Map peeraddr TxAdvertiser), + txAdvertisers :: !(Map peeraddr TxAdvertiser), -- | Stable salt used to break ties between equally scored advertisers. - txTieBreakSalt :: !Int, + txTieBreakSalt :: !Int, -- | Current per-peer attempt state for this tx body. - txAttempts :: !(Map peeraddr TxAttemptState) + txAttempts :: !(Map peeraddr TxAttemptState) } deriving stock (Eq, Show, Generic) + deriving anyclass NFData -- | The next peer-local action chosen by the V2 worker thread. -- @@ -191,6 +198,7 @@ data PeerAction -- mempool. PeerSubmitTxs ![TxKey] deriving stock (Eq, Show, Generic) + deriving anyclass NFData -- | A batch of transaction body requests sent to a peer. @@ -206,6 +214,7 @@ data RequestedTxBatch = RequestedTxBatch { , requestedTxBatchSize :: !SizeInBytes } deriving stock (Eq, Show, Generic) + deriving anyclass NFData -- | Coarse phase of a peer worker thread. -- @@ -221,6 +230,7 @@ data PeerPhase | -- | The peer worker is submitting buffered txs to the local mempool. PeerSubmittingToMempool deriving stock (Eq, Ord, Show, Generic) + deriving anyclass NFData -- | Peer usefulness score. -- @@ -230,6 +240,7 @@ data PeerScore = PeerScore { peerScoreTs :: !Time } deriving stock (Eq, Show, Generic) + deriving anyclass NFData -- | Low-cost monotonic counters for the V2 peer protocol and coordination path. -- @@ -250,6 +261,7 @@ data TxSubmissionCounters = TxSubmissionCounters { lateBodies :: !Word64 } deriving stock (Eq, Show, Generic) + deriving anyclass NFData instance Semigroup TxSubmissionCounters where a <> b = TxSubmissionCounters { @@ -310,6 +322,7 @@ data PeerTxLocalState tx = PeerTxLocalState { peerDownloadedTxs :: !(IntMap tx) } deriving stock (Eq, Show, Generic) + deriving anyclass NFData emptyPeerTxLocalState :: PeerTxLocalState tx emptyPeerTxLocalState = PeerTxLocalState { @@ -325,11 +338,12 @@ emptyPeerTxLocalState = PeerTxLocalState { -- | Small shared view of peer state used for lease claiming and peer -- selection. data SharedPeerState = SharedPeerState { - sharedPeerPhase :: !PeerPhase, - sharedPeerScore :: !PeerScore, - sharedPeerGeneration :: !Word64 + sharedPeerPhase :: !PeerPhase, + sharedPeerScore :: !PeerScore, + sharedPeerGeneration :: !Word64 } deriving stock (Eq, Show, Generic) + deriving anyclass NFData -- | Shared V2 state. -- @@ -349,6 +363,7 @@ data SharedTxState peeraddr txid = SharedTxState { sharedGeneration :: !Word64 } deriving stock (Eq, Show, Generic) + deriving anyclass NFData type RetainedTxs = IntPSQ Time () From 8a4b501819c72279a6224e10349321e3326b11c3 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 10 Apr 2026 10:43:58 +0200 Subject: [PATCH 09/67] fixup: better score calculations Have the peer update its score during phase change. This makes State's idle calculation quicker since peers will drain back to 0. Mark pacIdlePeerScores as lazy to avoid doing the calculations when it isn't needed. --- .../TxSubmission/Inbound/V2/Registry.hs | 30 +++++++++++++++---- .../Network/TxSubmission/Inbound/V2/State.hs | 4 ++- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index dd8ff40eca7..2afda455fdd 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -304,7 +304,8 @@ runNextPeerActionImp policy sharedStateVar peeraddr now peerState = atomically $ let sharedGeneration0 = sharedGeneration sharedState (peerAction, peerState', sharedState') = State.nextPeerAction now policy peeraddr peerState sharedState - sharedState'' = updatePeerPhase peeraddr (peerPhaseForActionIdle peerAction) sharedState' + sharedState'' = updatePeerPhase now policy peeraddr + (peerPhaseForActionIdle peerAction) sharedState' writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState'' return (peerAction, peerState') @@ -327,7 +328,7 @@ runNextPeerActionPipelinedImp policy sharedStateVar peeraddr now peerState = ato let sharedGeneration0 = sharedGeneration sharedState (peerAction, peerState', sharedState') = State.nextPeerActionPipelined now policy peeraddr peerState sharedState - sharedState'' = updatePeerPhase peeraddr + sharedState'' = updatePeerPhase now policy peeraddr (peerPhaseForActionPipelined peeraddr peerAction sharedState') sharedState' writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState'' @@ -502,7 +503,8 @@ resolveBufferedTxsImp sharedStateVar peerState txKeys = atomically $ do -- | Update a peer's phase. -- --- A phase change always bumps the shared generation. In addition: +-- A phase change always bumps the shared generation and normalizes the moving +-- peer's score by draining it to @now@. In addition: -- -- * When a peer becomes 'PeerIdle', bump that peer's own generation so a -- 'PeerDoNothing' action computed before the phase change does not put that @@ -515,23 +517,39 @@ resolveBufferedTxsImp sharedStateVar peerState txKeys = atomically $ do -- peer should wake and claim a tx. updatePeerPhase :: Ord peeraddr - => peeraddr + => Time + -> TxDecisionPolicy + -> peeraddr -> PeerPhase -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid -updatePeerPhase peeraddr peerPhaseNew st@SharedTxState { sharedPeers, sharedGeneration } = +updatePeerPhase now policy peeraddr peerPhaseNew + st@SharedTxState { sharedPeers, sharedGeneration } = case Map.lookup peeraddr sharedPeers of Just sharedPeerState -> let peerPhaseOld = sharedPeerPhase sharedPeerState in if peerPhaseOld /= peerPhaseNew then + let sharedPeerScore' = + normalizePeerScore (sharedPeerScore sharedPeerState) + sharedPeerState' = + sharedPeerState { + sharedPeerPhase = peerPhaseNew, + sharedPeerScore = sharedPeerScore' + } + in let st' = st { sharedPeers = Map.insert peeraddr - (sharedPeerState { sharedPeerPhase = peerPhaseNew }) sharedPeers + sharedPeerState' sharedPeers , sharedGeneration = sharedGeneration + 1 } in bumpIdlePeerGenerations (phaseWakePeers peerPhaseOld) st' else st _ -> st -- TODO error? where + normalizePeerScore ps@PeerScore { peerScoreValue, peerScoreTs } = + let !drain = realToFrac (diffTime now peerScoreTs) * scoreRate policy + !drained = max 0 (peerScoreValue - drain) + in ps { peerScoreValue = drained, peerScoreTs = now } + phaseWakePeers peerPhaseOld | peerPhaseOld /= PeerIdle , peerPhaseNew == PeerIdle = Set.singleton peeraddr diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index a802829ecc9..defe32d6dc1 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -44,7 +44,9 @@ data PeerActionContext peeraddr txid tx = PeerActionContext { -- | Shared tx-submission state after shared pruning has been applied. pacSharedState :: !(SharedTxState peeraddr txid), -- | Decayed scores for peers that are currently idle and eligible to claim work. - pacIdlePeerScores :: !(Map.Map peeraddr Double) + -- Note that this field is lazy to avoid calculating the idle scores when it + -- isn't needed. + pacIdlePeerScores :: (Map.Map peeraddr Double) } data PeerActionChoice peeraddr = From 87e9aa5060dbf4d18659df64cb4beb5cbf4205e5 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 10 Apr 2026 11:34:56 +0200 Subject: [PATCH 10/67] fixup: benchmarks Add benchmarks for V2 --- ouroboros-network/bench/Main.hs | 104 ++----- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 265 +++++++++++++++++- 2 files changed, 284 insertions(+), 85 deletions(-) diff --git a/ouroboros-network/bench/Main.hs b/ouroboros-network/bench/Main.hs index db7b6a15d28..59ea89e4f96 100644 --- a/ouroboros-network/bench/Main.hs +++ b/ouroboros-network/bench/Main.hs @@ -1,29 +1,18 @@ {-# LANGUAGE NumericUnderscores #-} --- pPrint -{-# OPTIONS_GHC -Wno-unused-imports #-} - module Main (main) where -import Control.DeepSeq +import Control.DeepSeq (NFData, rnf) import Control.Exception (evaluate) -import Data.Map (Map) -import Debug.Trace (traceMarkerIO) import System.Mem (performMajorGC) -import System.Random.SplitMix qualified as SM import Test.Tasty.Bench -import Text.Pretty.Simple (pPrint) - --- Disabled pending a replacement benchmark suite for the peer-driven V2 --- tx-submission scheduler. --- import Ouroboros.Network.TxSubmission.Inbound.V2.Decision qualified as Tx --- import Ouroboros.Network.TxSubmission.Inbound.V2.Policy --- import Ouroboros.Network.TxSubmission.Inbound.V2.State (SharedTxState (..)) import Test.Ouroboros.Network.PeerSelection.PeerMetric (microbenchmark1GenerateInput, microbenchmark1ProcessInput) -import Test.Ouroboros.Network.TxSubmission.TxLogic as TX -import Test.Ouroboros.Network.TxSubmission.Types +import Test.Ouroboros.Network.TxSubmission.TxLogic qualified as TX + +benchLoops :: Int +benchLoops = 1_0000 main :: IO () main = @@ -37,73 +26,22 @@ main = , env (microbenchmark1GenerateInput False 100_000) $ \i -> bench "100k" $ nfAppIO microbenchmark1ProcessInput i ] - {- Disabled until TxLogic has replacement peer-driven V2 benchmarks. - , bgroup "TxLogic" - [ env (do let a = TX.mkDecisionContext (SM.mkSMGen 131) 10 - evaluate (rnf a) - -- pPrint a - performMajorGC - traceMarkerIO "evaluated decision context" - return a - ) - (\(~a@(_policy, state)) -> - bench "makeDecisions: 10" - $ let f :: (TxDecisionPolicy, SharedTxState PeerAddr TxId (Tx TxId)) - -> ( SharedTxState PeerAddr TxId (Tx TxId) - , Map PeerAddr (Tx.TxDecision TxId (Tx TxId)) - ) - f = flip (uncurry Tx.makeDecisions) (peerTxStates state) - in nf f a - ) - , env (do let a = TX.mkDecisionContext (SM.mkSMGen 131) 100 - evaluate (rnf a) - -- pPrint a - performMajorGC - traceMarkerIO "evaluated decision context" - return a - ) - (\(~a@(_policy, state)) -> - bench "makeDecisions: 100" - $ let f :: ( TxDecisionPolicy - , SharedTxState PeerAddr TxId (Tx TxId) - ) - -> ( SharedTxState PeerAddr TxId (Tx TxId) - , Map PeerAddr (Tx.TxDecision TxId (Tx TxId)) - ) - f = flip (uncurry Tx.makeDecisions) (peerTxStates state) - in nf f a - ) - , env (do let a = TX.mkDecisionContext (SM.mkSMGen 361) 1_000 - evaluate (rnf a) - -- pPrint a - performMajorGC - traceMarkerIO "evaluated decision context" - return a - ) - (\(~a@(_policy, state)) -> - bench "makeDecisions: 1000" - $ let f :: ( TxDecisionPolicy - , SharedTxState PeerAddr TxId (Tx TxId) - ) - -> ( SharedTxState PeerAddr TxId (Tx TxId) - , Map PeerAddr (Tx.TxDecision TxId (Tx TxId)) - ) - f = flip (uncurry Tx.makeDecisions) (peerTxStates state) - in nf f a - ) - , env (do - smGen <- SM.initSMGen - print smGen - let a = TX.mkDecisionContext smGen 1000 - evaluate (rnf a) - traceMarkerIO "evaluated decision context" - return a - ) - (\a -> - bench "makeDecisions: random" - $ nf (uncurry Tx.makeDecisions) a - ) + , bgroup "TxSubmissionV2" + [ env (prepareEnv (TX.mkReceiveDuplicateFixture 100 3)) $ \fixture -> + bench "handleReceivedTxIds/duplicate-active/100existing/3txids/x1000" $ + nfAppIO (TX.runReceiveDuplicateLoop benchLoops) fixture + , env (prepareEnv (TX.mkForeignRejectedFixture 100 10)) $ \fixture -> + bench "nextPeerAction/ack-foreign-rejected/100advertisers/10txids/x1000" $ + nfAppIO (TX.runPeerActionLoop benchLoops) fixture + , env (prepareEnv (TX.mkFanoutFixture 100 3)) $ \fixture -> + bench "scenario/fanout/100peers/3txids/x1000" $ + nfAppIO (TX.runFanoutLoop benchLoops) fixture ] - -} ] ] + +prepareEnv :: NFData a => a -> IO a +prepareEnv a = do + _ <- evaluate (rnf a) + performMajorGC + pure a diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index d0164f01af5..670d6935690 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE BangPatterns #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RankNTypes #-} @@ -8,10 +9,21 @@ module Test.Ouroboros.Network.TxSubmission.TxLogic , ArbSharedTxState (..) , ArbSharedPeerState (..) , ArbPeerTxLocalState (..) + , ReceiveDuplicateFixture + , PeerActionFixture + , FanoutFixture + , mkReceiveDuplicateFixture + , mkForeignRejectedFixture + , mkFanoutFixture + , runReceiveDuplicateLoop + , runPeerActionLoop + , runFanoutLoop , sharedTxStateInvariant , InvariantStrength (..) ) where +import Control.DeepSeq (NFData (rnf)) +import Control.Exception (evaluate) import Control.Monad.Class.MonadTime.SI (Time (..), addTime, diffTime) import Data.Foldable (foldl', toList) import Data.Function (on) @@ -76,6 +88,49 @@ tests = type PeerAddr = Int +data ReceiveDuplicateFixture = ReceiveDuplicateFixture + { rdfPeerAddr :: !PeerAddr + , rdfRequestedTxIds :: !NumTxIdsToReq + , rdfTxidsAndSizes :: ![(TxId, SizeInBytes)] + , rdfPeerState :: !(PeerTxLocalState (Tx TxId)) + , rdfSharedState :: !(SharedTxState PeerAddr TxId) + } + +data PeerActionFixture = PeerActionFixture + { pafPeerAddr :: !PeerAddr + , pafPeerState :: !(PeerTxLocalState (Tx TxId)) + , pafSharedState :: !(SharedTxState PeerAddr TxId) + } + +data FanoutFixture = FanoutFixture + { ffPeers :: ![PeerAddr] + , ffRequestedTxIds :: !NumTxIdsToReq + , ffTxidsAndSizes :: ![(TxId, SizeInBytes)] + , ffInitialSharedState :: !(SharedTxState PeerAddr TxId) + } + +instance NFData ReceiveDuplicateFixture where + rnf ReceiveDuplicateFixture { rdfPeerAddr, rdfRequestedTxIds, rdfTxidsAndSizes + , rdfPeerState, rdfSharedState } = + rnf rdfPeerAddr + `seq` rnf rdfRequestedTxIds + `seq` rnf rdfTxidsAndSizes + `seq` rnf rdfPeerState + `seq` rnf rdfSharedState + +instance NFData PeerActionFixture where + rnf PeerActionFixture { pafPeerAddr, pafPeerState, pafSharedState } = + rnf pafPeerAddr + `seq` rnf pafPeerState + `seq` rnf pafSharedState + +instance NFData FanoutFixture where + rnf FanoutFixture { ffPeers, ffRequestedTxIds, ffTxidsAndSizes, ffInitialSharedState } = + rnf ffPeers + `seq` rnf ffRequestedTxIds + `seq` rnf ffTxidsAndSizes + `seq` rnf ffInitialSharedState + data InvariantStrength = WeakInvariant | StrongInvariant deriving (Eq, Show) @@ -1441,7 +1496,7 @@ unit_updatePeerPhase_wakesOnlyBecomingIdlePeer step = do , (other, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 11 }) ] } - sharedState' = updatePeerPhase peer PeerIdle sharedState0 + sharedState' = updatePeerPhase now defaultTxDecisionPolicy peer PeerIdle sharedState0 unit_updatePeerPhase_wakesCompetingAdvertisers :: (String -> IO ()) -> Assertion unit_updatePeerPhase_wakesCompetingAdvertisers step = do @@ -1475,7 +1530,7 @@ unit_updatePeerPhase_wakesCompetingAdvertisers step = do , txAttempts = Map.empty } } - sharedState' = updatePeerPhase leavingPeer PeerWaitingTxs sharedState0 + sharedState' = updatePeerPhase now defaultTxDecisionPolicy leavingPeer PeerWaitingTxs sharedState0 -- Generate a shared peer state. genSharedPeerState :: Gen SharedPeerState @@ -2009,3 +2064,209 @@ firstFreshTxId used = go go txid | IntSet.member txid used = go (txid + 1) | otherwise = txid + +mkReceiveDuplicateFixture :: Int -> Int -> ReceiveDuplicateFixture +mkReceiveDuplicateFixture existingAdvertisers txidCount = + ReceiveDuplicateFixture + { rdfPeerAddr = targetPeer + , rdfRequestedTxIds = fromIntegral txidCount + , rdfTxidsAndSizes = txidsAndSizes + , rdfPeerState = + emptyPeerTxLocalState { + peerRequestedTxIds = fromIntegral txidCount + } + , rdfSharedState = + mkActiveSharedState allPeers ownerPeer existingPeers False txidsAndSizes + } + where + ownerPeer = 0 + targetPeer = existingAdvertisers + existingPeers = [1 .. existingAdvertisers - 1] + allPeers = ownerPeer : targetPeer : existingPeers + txidsAndSizes = mkTxidsAndSizes txidCount + +mkForeignRejectedFixture :: Int -> Int -> PeerActionFixture +mkForeignRejectedFixture advertiserCount txidCount = + PeerActionFixture + { pafPeerAddr = targetPeer + , pafPeerState = + emptyPeerTxLocalState { + peerUnacknowledgedTxIds = StrictSeq.fromList txKeys + } + , pafSharedState = sharedState + } + where + ownerPeer = 0 + targetPeer = 1 + otherPeers = [2 .. advertiserCount - 1] + allPeers = [0 .. advertiserCount - 1] + txidsAndSizes = mkTxidsAndSizes txidCount + sharedState = + mkActiveSharedState allPeers ownerPeer (targetPeer : otherPeers) True txidsAndSizes + txKeys = fmap (`lookupKeyOrFail` sharedState) (fmap fst txidsAndSizes) + +mkFanoutFixture :: Int -> Int -> FanoutFixture +mkFanoutFixture peerCount txidCount = + FanoutFixture + { ffPeers = peers + , ffRequestedTxIds = fromIntegral txidCount + , ffTxidsAndSizes = txidsAndSizes + , ffInitialSharedState = + mkActiveSharedState allPeers ownerPeer [] False txidsAndSizes + } + where + ownerPeer = 0 + peers = [1 .. peerCount] + allPeers = ownerPeer : peers + txidsAndSizes = mkTxidsAndSizes txidCount + +runReceiveDuplicateLoop :: Int -> ReceiveDuplicateFixture -> IO () +runReceiveDuplicateLoop iterations ReceiveDuplicateFixture + { rdfPeerAddr + , rdfRequestedTxIds + , rdfTxidsAndSizes + , rdfPeerState + , rdfSharedState + } = + go iterations + where + go 0 = pure () + go n = do + let result = + handleReceivedTxIds + (const False) + now + defaultTxDecisionPolicy + rdfPeerAddr + rdfRequestedTxIds + rdfTxidsAndSizes + rdfPeerState + rdfSharedState + _ <- evaluate (rnf result) + go (n - 1) + +runPeerActionLoop :: Int -> PeerActionFixture -> IO () +runPeerActionLoop iterations PeerActionFixture + { pafPeerAddr + , pafPeerState + , pafSharedState + } = + go iterations + where + go 0 = pure () + go n = do + let result = + nextPeerAction now defaultTxDecisionPolicy pafPeerAddr pafPeerState pafSharedState + _ <- evaluate (rnf result) + go (n - 1) + +runFanoutLoop :: Int -> FanoutFixture -> IO () +runFanoutLoop iterations FanoutFixture + { ffPeers + , ffRequestedTxIds + , ffTxidsAndSizes + , ffInitialSharedState + } = + go iterations + where + go 0 = pure () + go n = do + _ <- evaluate (rnf roundResult) + go (n - 1) + + roundResult = + let (!peerStatesRev, !sharedStateAfterReceive) = + foldl' receiveOne ([], ffInitialSharedState) ffPeers + !sharedStateRejected = markAllRejected sharedStateAfterReceive + (!ackResultsRev, !sharedStateAfterAck) = + foldl' acknowledgeOne ([], sharedStateRejected) (reverse peerStatesRev) + in (reverse peerStatesRev, reverse ackResultsRev, sharedStateAfterAck) + + receiveOne + :: ([(PeerAddr, PeerTxLocalState (Tx TxId))], SharedTxState PeerAddr TxId) + -> PeerAddr + -> ([(PeerAddr, PeerTxLocalState (Tx TxId))], SharedTxState PeerAddr TxId) + receiveOne (!peerStatesAcc, !sharedStateAcc) peeraddr = + let peerState0 = + emptyPeerTxLocalState { + peerRequestedTxIds = ffRequestedTxIds + } + !(peerState', sharedStateAcc') = + handleReceivedTxIds + (const False) + now + defaultTxDecisionPolicy + peeraddr + ffRequestedTxIds + ffTxidsAndSizes + peerState0 + sharedStateAcc + in ((peeraddr, peerState') : peerStatesAcc, sharedStateAcc') + + acknowledgeOne + :: ([(PeerAddr, PeerAction, PeerTxLocalState (Tx TxId))], SharedTxState PeerAddr TxId) + -> (PeerAddr, PeerTxLocalState (Tx TxId)) + -> ([(PeerAddr, PeerAction, PeerTxLocalState (Tx TxId))], SharedTxState PeerAddr TxId) + acknowledgeOne (!ackResultsAcc, !sharedStateAcc) (peeraddr, peerState0) = + let !(peerAction, peerState', sharedStateAcc') = + nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedStateAcc + in ( (peeraddr, peerAction, peerState') : ackResultsAcc + , sharedStateAcc' + ) + +mkTxidsAndSizes :: Int -> [(TxId, SizeInBytes)] +mkTxidsAndSizes count = + [ (txid, fromIntegral (128 + txid)) + | txid <- [1 .. count] + ] + +mkActiveSharedState + :: [PeerAddr] + -> PeerAddr + -> [PeerAddr] + -> Bool + -> [(TxId, SizeInBytes)] + -> SharedTxState PeerAddr TxId +mkActiveSharedState allPeers ownerPeer resolvedAdvertisers rejected txidsAndSizes = + sharedState1 { + sharedTxTable = + IntMap.fromList + [ (unTxKey txKey, mkEntry txKey) + | (txid, _txSize) <- txidsAndSizes + , let txKey = lookupKeyOrFail txid sharedState1 + ] + } + where + sharedState0 = + emptySharedTxState { + sharedPeers = + Map.fromList + [ (peeraddr, mkSharedPeerState PeerIdle (emptyPeerScore now)) + | peeraddr <- allPeers + ] + } + sharedState1 = snd (internTxIds (fmap fst txidsAndSizes) sharedState0) + + advertisers = + Map.fromList $ + (ownerPeer, TxAdvertiser AckWhenBuffered) + : [ (peeraddr, TxAdvertiser AckWhenResolved) + | peeraddr <- resolvedAdvertisers + ] + + mkEntry txKey = TxEntry + { txLease = TxLeased ownerPeer (addTime 10 now) + , txAdvertisers = advertisers + , txTieBreakSalt = unTxKey txKey + , txAttempts = Map.empty + , txMempoolRejected = rejected + } + +markAllRejected :: SharedTxState PeerAddr TxId -> SharedTxState PeerAddr TxId +markAllRejected st@SharedTxState { sharedTxTable, sharedGeneration } = + st { + sharedTxTable = IntMap.map markRejected sharedTxTable, + sharedGeneration = sharedGeneration + 1 + } + where + markRejected txEntry = txEntry { txMempoolRejected = True } From 11a276d2371b181c1de3f81c53dab9822e160b0a Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 10 Apr 2026 14:53:07 +0200 Subject: [PATCH 11/67] fixup: fix test suite --- ouroboros-network/bench/Main.hs | 6 +-- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 44 ++++++++++++------- 2 files changed, 31 insertions(+), 19 deletions(-) diff --git a/ouroboros-network/bench/Main.hs b/ouroboros-network/bench/Main.hs index 59ea89e4f96..4e71c99a770 100644 --- a/ouroboros-network/bench/Main.hs +++ b/ouroboros-network/bench/Main.hs @@ -30,11 +30,11 @@ main = [ env (prepareEnv (TX.mkReceiveDuplicateFixture 100 3)) $ \fixture -> bench "handleReceivedTxIds/duplicate-active/100existing/3txids/x1000" $ nfAppIO (TX.runReceiveDuplicateLoop benchLoops) fixture - , env (prepareEnv (TX.mkForeignRejectedFixture 100 10)) $ \fixture -> - bench "nextPeerAction/ack-foreign-rejected/100advertisers/10txids/x1000" $ + , env (prepareEnv (TX.mkResolvedAckFixture 100 10)) $ \fixture -> + bench "nextPeerAction/ack-resolved-retained/100advertisers/10txids/x1000" $ nfAppIO (TX.runPeerActionLoop benchLoops) fixture , env (prepareEnv (TX.mkFanoutFixture 100 3)) $ \fixture -> - bench "scenario/fanout/100peers/3txids/x1000" $ + bench "scenario/fanout-retained/100peers/3txids/x1000" $ nfAppIO (TX.runFanoutLoop benchLoops) fixture ] ] diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 670d6935690..6968d3d7c0e 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -13,6 +13,7 @@ module Test.Ouroboros.Network.TxSubmission.TxLogic , PeerActionFixture , FanoutFixture , mkReceiveDuplicateFixture + , mkResolvedAckFixture , mkForeignRejectedFixture , mkFanoutFixture , runReceiveDuplicateLoop @@ -2076,7 +2077,7 @@ mkReceiveDuplicateFixture existingAdvertisers txidCount = peerRequestedTxIds = fromIntegral txidCount } , rdfSharedState = - mkActiveSharedState allPeers ownerPeer existingPeers False txidsAndSizes + mkActiveSharedState allPeers ownerPeer existingPeers txidsAndSizes } where ownerPeer = 0 @@ -2085,8 +2086,10 @@ mkReceiveDuplicateFixture existingAdvertisers txidCount = allPeers = ownerPeer : targetPeer : existingPeers txidsAndSizes = mkTxidsAndSizes txidCount -mkForeignRejectedFixture :: Int -> Int -> PeerActionFixture -mkForeignRejectedFixture advertiserCount txidCount = +-- Prebuild an ack-only workload after all advertised txs have resolved into +-- retained entries. +mkResolvedAckFixture :: Int -> Int -> PeerActionFixture +mkResolvedAckFixture advertiserCount txidCount = PeerActionFixture { pafPeerAddr = targetPeer , pafPeerState = @@ -2101,9 +2104,14 @@ mkForeignRejectedFixture advertiserCount txidCount = otherPeers = [2 .. advertiserCount - 1] allPeers = [0 .. advertiserCount - 1] txidsAndSizes = mkTxidsAndSizes txidCount - sharedState = - mkActiveSharedState allPeers ownerPeer (targetPeer : otherPeers) True txidsAndSizes - txKeys = fmap (`lookupKeyOrFail` sharedState) (fmap fst txidsAndSizes) + sharedState0 = + mkActiveSharedState allPeers ownerPeer (targetPeer : otherPeers) txidsAndSizes + sharedState = retainAllActiveTxs sharedState0 + txKeys = fmap (`lookupKeyOrFail` sharedState0) (fmap fst txidsAndSizes) + +-- | Compatibility alias for the previous benchmark helper name. +mkForeignRejectedFixture :: Int -> Int -> PeerActionFixture +mkForeignRejectedFixture = mkResolvedAckFixture mkFanoutFixture :: Int -> Int -> FanoutFixture mkFanoutFixture peerCount txidCount = @@ -2112,7 +2120,7 @@ mkFanoutFixture peerCount txidCount = , ffRequestedTxIds = fromIntegral txidCount , ffTxidsAndSizes = txidsAndSizes , ffInitialSharedState = - mkActiveSharedState allPeers ownerPeer [] False txidsAndSizes + mkActiveSharedState allPeers ownerPeer [] txidsAndSizes } where ownerPeer = 0 @@ -2177,9 +2185,9 @@ runFanoutLoop iterations FanoutFixture roundResult = let (!peerStatesRev, !sharedStateAfterReceive) = foldl' receiveOne ([], ffInitialSharedState) ffPeers - !sharedStateRejected = markAllRejected sharedStateAfterReceive + !sharedStateResolved = retainAllActiveTxs sharedStateAfterReceive (!ackResultsRev, !sharedStateAfterAck) = - foldl' acknowledgeOne ([], sharedStateRejected) (reverse peerStatesRev) + foldl' acknowledgeOne ([], sharedStateResolved) (reverse peerStatesRev) in (reverse peerStatesRev, reverse ackResultsRev, sharedStateAfterAck) receiveOne @@ -2224,10 +2232,9 @@ mkActiveSharedState :: [PeerAddr] -> PeerAddr -> [PeerAddr] - -> Bool -> [(TxId, SizeInBytes)] -> SharedTxState PeerAddr TxId -mkActiveSharedState allPeers ownerPeer resolvedAdvertisers rejected txidsAndSizes = +mkActiveSharedState allPeers ownerPeer resolvedAdvertisers txidsAndSizes = sharedState1 { sharedTxTable = IntMap.fromList @@ -2259,14 +2266,19 @@ mkActiveSharedState allPeers ownerPeer resolvedAdvertisers rejected txidsAndSize , txAdvertisers = advertisers , txTieBreakSalt = unTxKey txKey , txAttempts = Map.empty - , txMempoolRejected = rejected } -markAllRejected :: SharedTxState PeerAddr TxId -> SharedTxState PeerAddr TxId -markAllRejected st@SharedTxState { sharedTxTable, sharedGeneration } = +-- Resolve all active txs into retained entries so non-owner peers may safely +-- acknowledge their txids. +retainAllActiveTxs :: SharedTxState PeerAddr TxId -> SharedTxState PeerAddr TxId +retainAllActiveTxs st@SharedTxState { sharedTxTable, sharedRetainedTxs, sharedGeneration } = st { - sharedTxTable = IntMap.map markRejected sharedTxTable, + sharedTxTable = IntMap.empty, + sharedRetainedTxs = IntMap.foldlWithKey' retainOne sharedRetainedTxs sharedTxTable, sharedGeneration = sharedGeneration + 1 } where - markRejected txEntry = txEntry { txMempoolRejected = True } + retainUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now + + retainOne retainedAcc k _ = + retainedInsertMax k retainUntil retainedAcc From 1225ed555d043041459716fb205aa59ecbc21007 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 10 Apr 2026 16:39:04 +0200 Subject: [PATCH 12/67] fixup: improve bumpIdlePeerGeneration Instead of traversing all peers only touch the peers that need to wake up. --- .../Network/TxSubmission/Inbound/V2/Types.hs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index de47cebfecb..0c3aa994b99 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -474,14 +474,16 @@ bumpIdlePeerGenerations :: Ord peeraddr -> SharedTxState peeraddr txid bumpIdlePeerGenerations peersToWake st@SharedTxState { sharedPeers } = st { - sharedPeers = Map.mapWithKey bumpPeer sharedPeers + sharedPeers = foldl' bumpOne sharedPeers (Set.toList peersToWake) } where - bumpPeer peeraddr sharedPeerState@SharedPeerState { sharedPeerPhase, sharedPeerGeneration } - | Set.member peeraddr peersToWake - , sharedPeerPhase == PeerIdle = - sharedPeerState { sharedPeerGeneration = sharedPeerGeneration + 1 } - | otherwise = sharedPeerState + bumpOne peersMap peeraddr = + Map.adjust bumpIdlePeer peeraddr peersMap + where + bumpIdlePeer sharedPeerState@SharedPeerState { sharedPeerPhase, sharedPeerGeneration } + | sharedPeerPhase == PeerIdle = + sharedPeerState { sharedPeerGeneration = sharedPeerGeneration + 1 } + | otherwise = sharedPeerState lookupTxKey :: Ord txid => txid From cd5f3f2b9ceb5f38fa1455d00a1ed5cf368e33bc Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 10 Apr 2026 17:04:03 +0200 Subject: [PATCH 13/67] fixup: avoid copying the entire advertiser map Avoid copying the map when encountering an existing advertiser. --- .../Network/TxSubmission/Inbound/V2/State.hs | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index defe32d6dc1..8d2a8f9fd8b 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -1147,13 +1147,11 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize ) addAdvertiser txEntry@TxEntry { txAdvertisers } = - case Map.insertLookupWithKey (\_ _ old -> old) - peeraddr - (TxAdvertiser AckWhenResolved) - txAdvertisers of - (Nothing, txAdvertisers') -> - ( True - , txEntry { txAdvertisers = txAdvertisers' } - ) - (Just _, _) -> + case Map.lookup peeraddr txAdvertisers of + Just _ -> + -- Peer already an advertiser, avoid copying the map (False, txEntry) + Nothing -> + -- New advertiser, insert and copy + let txAdvertisers' = Map.insert peeraddr (TxAdvertiser AckWhenResolved) txAdvertisers + in (True, txEntry { txAdvertisers = txAdvertisers' }) From 6259531204c049143f3538255c645395f3f270b4 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 10 Apr 2026 17:24:16 +0200 Subject: [PATCH 14/67] fixup: improve applyRequestTxIdsChoice and acknowledgeTxIds --- .../Network/TxSubmission/Inbound/V2/State.hs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 8d2a8f9fd8b..e77933075d8 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -235,7 +235,7 @@ applyRequestTxIdsChoice ctx acknowledgedTxIds txIdsToAcknowledge txIdsToRequest peerState'' = (pacPeerState ctx) { peerAvailableTxIds = - foldl' (flip IntMap.delete) (peerAvailableTxIds (pacPeerState ctx)) (unTxKey <$> acknowledgedTxIds), + IntMap.withoutKeys (peerAvailableTxIds (pacPeerState ctx)) (IntSet.fromList $ unTxKey <$> acknowledgedTxIds), peerUnacknowledgedTxIds = unacknowledgedTxIds', peerRequestedTxIds = peerRequestedTxIds (pacPeerState ctx) + txIdsToRequest } @@ -557,15 +557,19 @@ acknowledgeTxIds peeraddr acknowledgedTxIds st = acknowledgeOne acc0@(_, acc) (TxKey k) = case IntMap.lookup k (sharedTxTable acc) of Just txEntry@TxEntry { txAdvertisers } -> - case Map.updateLookupWithKey (\_ _ -> Nothing) peeraddr txAdvertisers of - (Just _, txAdvertisers') -> - let txEntry' = txEntry { txAdvertisers = txAdvertisers' } + -- Check if peer exists before copying the map + case Map.lookup peeraddr txAdvertisers of + Just _ -> + -- Peer exists, remove it and copy + let txAdvertisers' = Map.delete peeraddr txAdvertisers + txEntry' = txEntry { txAdvertisers = txAdvertisers' } !acc' = if activeTxLive txEntry' then acc { sharedTxTable = IntMap.insert k txEntry' (sharedTxTable acc) } else dropTxKey k acc in (True, acc') - (Nothing, _) -> + Nothing -> + -- Peer not an advertiser, no work needed acc0 Nothing -> acc0 From 2f0646026360591cca47c15b1a265d1c87206f11 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 10 Apr 2026 18:24:21 +0200 Subject: [PATCH 15/67] fixup: simpler txAdvertisers We don't need to track the ack state, it can be derived by the time we decide to ack a TX. So txAdvertisers can just be a Set member check. --- .../TxSubmission/Inbound/V2/Registry.hs | 12 +-- .../Network/TxSubmission/Inbound/V2/State.hs | 88 ++++++++++--------- .../Network/TxSubmission/Inbound/V2/Types.hs | 2 +- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 66 +++++--------- 4 files changed, 76 insertions(+), 92 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 2afda455fdd..4a813b23f6c 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -214,7 +214,7 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar in if txLive txEntry' then ( IntMap.insert k txEntry' txTableAcc , if touched - then Set.union wakeAcc (Set.delete peeraddr (Map.keysSet (txAdvertisers txEntry'))) + then Set.union wakeAcc (Set.delete peeraddr (txAdvertisers txEntry')) else wakeAcc ) else (txTableAcc, wakeAcc) @@ -222,7 +222,7 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar scrubTxEntry txEntry@TxEntry { txLease, txAdvertisers, txAttempts } = txEntry { txLease = scrubLease txLease, - txAdvertisers = Map.delete peeraddr txAdvertisers, + txAdvertisers = Set.delete peeraddr txAdvertisers, txAttempts = Map.delete peeraddr txAttempts } @@ -233,12 +233,12 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar txTouchesPeer TxEntry { txLease, txAdvertisers, txAttempts } = leaseOwnedByPeer txLease - || Map.member peeraddr txAdvertisers + || Set.member peeraddr txAdvertisers || Map.member peeraddr txAttempts txLive TxEntry { txLease, txAdvertisers, txAttempts } = leaseLive txLease - || not (Map.null txAdvertisers) + || not (Set.null txAdvertisers) || not (Map.null txAttempts) leaseOwnedByPeer (TxLeased owner _) = owner == peeraddr @@ -567,8 +567,8 @@ advertisersForPeerTxsExcept peeraddr SharedTxState { sharedTxTable } = IntMap.foldl' collect Set.empty sharedTxTable where collect peers TxEntry { txAdvertisers } - | Map.member peeraddr txAdvertisers = - Set.union peers (Set.delete peeraddr (Map.keysSet txAdvertisers)) + | Set.member peeraddr txAdvertisers = + Set.union peers (Set.delete peeraddr txAdvertisers) | otherwise = peers diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index e77933075d8..1518f66dd77 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -406,7 +406,7 @@ nextWakeDelay PeerActionContext { pacNow, pacPeerAddr, pacSharedState } = IntMap.foldl' stepLease Nothing (sharedTxTable pacSharedState) stepLease acc txEntry@TxEntry { txLease } = - if Map.member pacPeerAddr (txAdvertisers txEntry) + if Set.member pacPeerAddr (txAdvertisers txEntry) then minMaybe acc (futureLeaseWake txLease) else acc @@ -428,14 +428,11 @@ claimTx :: Ord peeraddr -> Time -> TxEntry peeraddr -> TxEntry peeraddr -claimTx peeraddr leaseUntil txEntry@TxEntry { txAdvertisers, txAttempts } = +claimTx peeraddr leaseUntil txEntry@TxEntry { txAttempts } = txEntry { txLease = TxLeased peeraddr leaseUntil, - txAdvertisers = Map.adjust setAckWhenBuffered peeraddr txAdvertisers, txAttempts = Map.insert peeraddr TxDownloading txAttempts } - where - setAckWhenBuffered advertiser = advertiser { txAckState = AckWhenBuffered } -- | Determine if a tx is eligible for this peer to request. -- @@ -450,9 +447,9 @@ txSelectable PeerActionContext { pacNow, pacPeerAddr, pacPolicy, pacIdlePeerScor | txSubmittingAnywhere txEntry = False | txPeerHasAttempt = False | txActiveAttemptCount txEntry >= txInflightMultiplicity pacPolicy = False - | txOwnedByPeer txEntry && Map.member pacPeerAddr txAdvertisers = True + | txOwnedByPeer txEntry && Set.member pacPeerAddr txAdvertisers = True | otherwise = - let peerMayClaim = Map.member pacPeerAddr txAdvertisers && + let peerMayClaim = Set.member pacPeerAddr txAdvertisers && (case pickClaimOwner of Just owner -> owner == pacPeerAddr Nothing -> False) in @@ -470,7 +467,7 @@ txSelectable PeerActionContext { pacNow, pacPeerAddr, pacPolicy, pacIdlePeerScor where eligiblePeers = [ (candidate, score) - | candidate <- Map.keys txAdvertisers + | candidate <- Set.toList txAdvertisers , Just score <- [Map.lookup candidate pacIdlePeerScores] ] @@ -557,20 +554,17 @@ acknowledgeTxIds peeraddr acknowledgedTxIds st = acknowledgeOne acc0@(_, acc) (TxKey k) = case IntMap.lookup k (sharedTxTable acc) of Just txEntry@TxEntry { txAdvertisers } -> - -- Check if peer exists before copying the map - case Map.lookup peeraddr txAdvertisers of - Just _ -> - -- Peer exists, remove it and copy - let txAdvertisers' = Map.delete peeraddr txAdvertisers - txEntry' = txEntry { txAdvertisers = txAdvertisers' } - !acc' = - if activeTxLive txEntry' - then acc { sharedTxTable = IntMap.insert k txEntry' (sharedTxTable acc) } - else dropTxKey k acc - in (True, acc') - Nothing -> - -- Peer not an advertiser, no work needed - acc0 + if Set.member peeraddr txAdvertisers + then + let txAdvertisers' = Set.delete peeraddr txAdvertisers + txEntry' = txEntry { txAdvertisers = txAdvertisers' } + !acc' = + if activeTxLive txEntry' + then acc { sharedTxTable = IntMap.insert k txEntry' (sharedTxTable acc) } + else dropTxKey k acc + in (True, acc') + else + acc0 Nothing -> acc0 @@ -588,18 +582,26 @@ txIdAckable PeerActionContext { pacPeerAddr, pacPeerState, pacSharedState } (TxK | retainedMember k (sharedRetainedTxs pacSharedState) = True | otherwise = case IntMap.lookup k (sharedTxTable pacSharedState) of - Just txEntry -> - case Map.lookup pacPeerAddr (txAdvertisers txEntry) of - Just TxAdvertiser { txAckState = AckWhenBuffered } -> - -- Ack the txid if we downloaded it and no other - -- peer is in the process of submitting it to the - -- mempool. - IntMap.member k (peerDownloadedTxs pacPeerState) - && not (txBufferedByPeer pacPeerAddr txEntry - && txSubmittingByOther pacPeerAddr txEntry) - Just TxAdvertiser { txAckState = AckWhenResolved } -> - False -- This becomes ackable once the tx is retained or later pruned. - Nothing -> True -- Safe late ack after this peer was pruned from the shared entry. + Just txEntry@TxEntry { txLease, txAdvertisers, txAttempts } -> + if Set.member pacPeerAddr txAdvertisers + then + let ackWhenBuffered = + case txLease of + TxLeased owner _ -> owner == pacPeerAddr || Map.member pacPeerAddr txAttempts + TxClaimable -> Map.member pacPeerAddr txAttempts + in + if ackWhenBuffered + then + -- Ack the txid if we downloaded it and no other + -- peer is in the process of submitting it to the + -- mempool. + IntMap.member k (peerDownloadedTxs pacPeerState) + && not (txBufferedByPeer pacPeerAddr txEntry + && txSubmittingByOther pacPeerAddr txEntry) + else + False -- This becomes ackable once the tx is retained or later pruned. + else + True -- Safe late ack after this peer was pruned from the shared entry. Nothing -> True -- Safe late ack after the resolved tx was pruned from shared state. -- | Remove one transaction entry from all shared state maps by key. @@ -663,7 +665,7 @@ dropDeadActiveKeys keys st@SharedTxState { sharedTxTable } = activeTxLive :: TxEntry peeraddr -> Bool activeTxLive TxEntry { txLease, txAdvertisers, txAttempts } = leaseLive txLease - || not (Map.null txAdvertisers) + || not (Set.null txAdvertisers) || not (Map.null txAttempts) where leaseLive TxClaimable = False @@ -693,7 +695,7 @@ advertisersForEntryExcept :: Ord peeraddr -> TxEntry peeraddr -> Set.Set peeraddr advertisersForEntryExcept currentPeer TxEntry { txAdvertisers } = - Set.delete currentPeer (Map.keysSet txAdvertisers) + Set.delete currentPeer txAdvertisers -- | Handle a batch of tx bodies received from one peer. @@ -876,7 +878,7 @@ handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = txLease = case txLease of TxLeased owner _ | owner == peeraddr -> TxClaimable _ -> txLease, - txAdvertisers = Map.delete peeraddr txAdvertisers, + txAdvertisers = Set.delete peeraddr txAdvertisers, txAttempts = Map.delete peeraddr txAttempts } @@ -945,7 +947,7 @@ handleSubmittedTxs now policy peeraddr acceptedTxs rejectedTxs peerState sharedS txLease = case txLease of TxLeased owner _ | owner == peeraddr -> TxClaimable _ -> txLease, - txAdvertisers = Map.delete peeraddr txAdvertisers, + txAdvertisers = Set.delete peeraddr txAdvertisers, txAttempts = Map.delete peeraddr txAttempts } @@ -1111,7 +1113,7 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize Nothing -> let txEntry = TxEntry { txLease = TxLeased peeraddr (addTime (interTxSpace policy) now), - txAdvertisers = Map.singleton peeraddr (TxAdvertiser AckWhenBuffered), + txAdvertisers = Set.singleton peeraddr, txTieBreakSalt = k, txAttempts = Map.empty } @@ -1151,11 +1153,11 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize ) addAdvertiser txEntry@TxEntry { txAdvertisers } = - case Map.lookup peeraddr txAdvertisers of - Just _ -> + if Set.member peeraddr txAdvertisers + then -- Peer already an advertiser, avoid copying the map (False, txEntry) - Nothing -> + else -- New advertiser, insert and copy - let txAdvertisers' = Map.insert peeraddr (TxAdvertiser AckWhenResolved) txAdvertisers + let txAdvertisers' = Set.insert peeraddr txAdvertisers in (True, txEntry { txAdvertisers = txAdvertisers' }) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 0c3aa994b99..982381311f2 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -169,7 +169,7 @@ data TxEntry peeraddr = TxEntry { txLease :: !(TxLease peeraddr), -- | Peers that have advertised this tx. - txAdvertisers :: !(Map peeraddr TxAdvertiser), + txAdvertisers :: !(Set.Set peeraddr), -- | Stable salt used to break ties between equally scored advertisers. txTieBreakSalt :: !Int, diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 6968d3d7c0e..8a9ac6abbeb 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -191,7 +191,7 @@ sharedTxStateInvariant strength SharedTxState { activeEntryLive TxEntry { txLease, txAdvertisers, txAttempts } = leaseLive txLease - || not (Map.null txAdvertisers) + || not (Set.null txAdvertisers) || not (Map.null txAttempts) leaseLive TxClaimable = False @@ -200,13 +200,13 @@ sharedTxStateInvariant strength SharedTxState { checkTxEntry (k, txEntry@TxEntry { txLease, txAdvertisers, txAttempts }) = counterexample ("bad active tx entry " ++ show k ++ ": " ++ show txEntry) $ conjoin - [ property (Map.keysSet txAdvertisers `Set.isSubsetOf` knownPeers) - , property (Map.keysSet txAttempts `Set.isSubsetOf` Map.keysSet txAdvertisers) + [ property (txAdvertisers `Set.isSubsetOf` knownPeers) + , property (Map.keysSet txAttempts `Set.isSubsetOf` txAdvertisers) , case txLease of TxClaimable -> property True TxLeased owner _ -> - property (Map.member owner sharedPeers && Map.member owner txAdvertisers) + property (Map.member owner sharedPeers && Set.member owner txAdvertisers) ] newtype ArbTxDecisionPolicy = ArbTxDecisionPolicy TxDecisionPolicy @@ -330,7 +330,7 @@ prop_handleReceivedTxIds_newEntries (Positive peeraddr) (ArbSharedTxState shared Just TxEntry { txLease, txAdvertisers, txAttempts } -> conjoin [ txLease === TxLeased peeraddr expectedLeaseUntil - , txAdvertisers === Map.singleton peeraddr (mkAdvertiser AckWhenBuffered) + , txAdvertisers === Set.singleton peeraddr , txAttempts === Map.empty ] @@ -627,7 +627,7 @@ prop_nextPeerAction_prioritisesSubmit (Positive peeraddr) txid0 txSize0 = { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenBuffered) + , txAdvertisers = Set.singleton peeraddr , txTieBreakSalt = txid , txAttempts = Map.singleton peeraddr TxBuffered } @@ -825,7 +825,7 @@ unit_nextPeerAction_skipsBlockedAvailableTxs step = do }) , (kClaimable, TxEntry { txLease = TxClaimable - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved) + , txAdvertisers = Set.singleton peeraddr , txTieBreakSalt = 0 , txAttempts = Map.empty }) @@ -860,7 +860,7 @@ prop_nextPeerAction_ownerSubmitsBuffered (Positive peeraddr) txid0 txSize0 = { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxClaimable - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenBuffered) + , txAdvertisers = Set.singleton peeraddr , txTieBreakSalt = txid , txAttempts = Map.singleton peeraddr TxBuffered } @@ -925,7 +925,7 @@ unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx step = do [ (kBlocked, blockedEntry) , (kClaimable, TxEntry { txLease = TxClaimable - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved) + , txAdvertisers = Set.singleton peeraddr , txTieBreakSalt = claimableTxid , txAttempts = Map.empty }) @@ -1196,7 +1196,7 @@ prop_nextPeerActionPipelined_secondBodyBatch (Positive peeraddr) txidA0 txidB0 t [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading)) , (kB, TxEntry { txLease = TxClaimable - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved) + , txAdvertisers = Set.singleton peeraddr , txTieBreakSalt = txidB , txAttempts = Map.empty }) @@ -1259,7 +1259,7 @@ prop_nextPeerActionPipelined_noThirdBodyBatch (Positive peeraddr) txidA0 txidB0 , (kB, mkTxEntry peeraddr txSizeB (Just TxDownloading)) , (kC, TxEntry { txLease = TxClaimable - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenResolved) + , txAdvertisers = Set.singleton peeraddr , txTieBreakSalt = txidC , txAttempts = Map.empty }) @@ -1698,27 +1698,18 @@ genClaimableTxEntry peeraddrs txid = do txAttempts = Map.empty } --- Generate advertisers where only the chosen owner uses AckWhenBuffered. +-- Generate the advertiser set for an entry owned by the chosen peer. genOwnedAdvertisers :: [PeerAddr] -> PeerAddr - -> Gen (Map.Map PeerAddr TxAdvertiser) + -> Gen (Set.Set PeerAddr) genOwnedAdvertisers advertiserPeers owner = - Map.fromList <$> mapM genAdvertiser advertiserPeers - where - genAdvertiser peeraddr = - let txAckState - | peeraddr == owner = AckWhenBuffered - | otherwise = AckWhenResolved - in pure (peeraddr, TxAdvertiser { txAckState }) - --- Generate advertisers that all acknowledge on resolution. -genResolvedAdvertisers :: [PeerAddr] -> Gen (Map.Map PeerAddr TxAdvertiser) + pure (Set.fromList (owner : advertiserPeers)) + +-- Generate the advertiser set for a claimable entry. +genResolvedAdvertisers :: [PeerAddr] -> Gen (Set.Set PeerAddr) genResolvedAdvertisers advertiserPeers = - Map.fromList <$> mapM genAdvertiser advertiserPeers - where - genAdvertiser peeraddr = - pure (peeraddr, TxAdvertiser { txAckState = AckWhenResolved }) + pure (Set.fromList advertiserPeers) -- Rebuild a shared state from tx-centric fixtures while preserving interned keys. buildSharedTxState @@ -1831,7 +1822,7 @@ updatePeerUsage peeraddr submitting hasRequestedTxs acc = -- Collect every peer mentioned by a tx entry. entryPeers :: TxEntry PeerAddr -> [PeerAddr] entryPeers TxEntry { txLease, txAdvertisers, txAttempts } = - leaseOwner <> Map.keys txAdvertisers <> Map.keys txAttempts + leaseOwner <> Set.toList txAdvertisers <> Map.keys txAttempts where leaseOwner = case txLease of @@ -1999,13 +1990,9 @@ mkSharedPeerState sharedPeerPhase sharedPeerScore = mkSharedState :: [TxId] -> SharedTxState PeerAddr TxId mkSharedState txids = snd (internTxIds txids emptySharedTxState) --- Construct a single advertiser entry. -mkAdvertiser :: TxOwnerAckState -> TxAdvertiser -mkAdvertiser txAckState = TxAdvertiser { txAckState } - --- Construct an advertiser map for a set of peers. -mkAdvertisers :: [(PeerAddr, TxOwnerAckState)] -> Map.Map PeerAddr TxAdvertiser -mkAdvertisers = Map.fromList . fmap (\(peeraddr, txAckState) -> (peeraddr, mkAdvertiser txAckState)) +-- Construct an advertiser set for a group of peers. +mkAdvertisers :: [(PeerAddr, TxOwnerAckState)] -> Set.Set PeerAddr +mkAdvertisers = Set.fromList . fmap fst -- Construct a requested batch together with its cached key set. mkRequestedTxBatch :: [TxKey] -> SizeInBytes -> RequestedTxBatch @@ -2018,7 +2005,7 @@ mkRequestedTxBatch keys requestedTxBatchSize = RequestedTxBatch mkTxEntry :: PeerAddr -> SizeInBytes -> Maybe TxAttemptState -> TxEntry PeerAddr mkTxEntry peeraddr _txSize mAttempt = TxEntry { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertisers = Map.singleton peeraddr (mkAdvertiser AckWhenBuffered) + , txAdvertisers = Set.singleton peeraddr , txTieBreakSalt = 0 , txAttempts = maybe Map.empty (Map.singleton peeraddr) mAttempt } @@ -2254,12 +2241,7 @@ mkActiveSharedState allPeers ownerPeer resolvedAdvertisers txidsAndSizes = } sharedState1 = snd (internTxIds (fmap fst txidsAndSizes) sharedState0) - advertisers = - Map.fromList $ - (ownerPeer, TxAdvertiser AckWhenBuffered) - : [ (peeraddr, TxAdvertiser AckWhenResolved) - | peeraddr <- resolvedAdvertisers - ] + advertisers = Set.fromList (ownerPeer : resolvedAdvertisers) mkEntry txKey = TxEntry { txLease = TxLeased ownerPeer (addTime 10 now) From eb6560a420b0a2145502a7a66294db33aec0a1c4 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Sat, 11 Apr 2026 18:06:54 +0200 Subject: [PATCH 16/67] fixup: remove expensive shared TX advertiser tracking Remove the tracking of advertisers in TxEntry. This change also changing how scoring is used to rank peers. A peers score affects how long time after the TX owners lease expire they can wake up and attempt to claim it. This means that acknowledgement/downloading requires minimal coordination between peers. --- .../TxSubmission/Inbound/V2/Registry.hs | 93 +-- .../Network/TxSubmission/Inbound/V2/State.hs | 727 +++++++++++------- .../Network/TxSubmission/Inbound/V2/Types.hs | 27 +- .../Ouroboros/Network/OrphanInstances.hs | 2 +- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 577 +++++++++----- .../Ouroboros/Network/Tracing/TxSubmission.hs | 2 +- 6 files changed, 907 insertions(+), 521 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 4a813b23f6c..f8ae48c5d2c 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -175,7 +175,9 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar , addCounters = \delta -> atomically $ modifyTVar countersVar (<> delta) } ) - (\_ -> atomically $ modifyTVar sharedStateVar unregisterPeer) + (\_ -> do + now <- getMonotonicTime + atomically $ modifyTVar sharedStateVar (unregisterPeer now)) io where registerPeer :: Time -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid @@ -188,11 +190,12 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar sharedPeerState = SharedPeerState { sharedPeerPhase = PeerIdle, sharedPeerScore = emptyPeerScore now, + sharedPeerAdvertisedTxKeys = IntSet.empty, sharedPeerGeneration = 0 } - unregisterPeer :: SharedTxState peeraddr txid -> SharedTxState peeraddr txid - unregisterPeer st@SharedTxState { sharedPeers, sharedTxTable, sharedRetainedTxs, sharedTxIdToKey, sharedKeyToTxId, sharedGeneration } = + unregisterPeer :: Time -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid + unregisterPeer now st@SharedTxState { sharedPeers, sharedTxTable, sharedRetainedTxs, sharedTxIdToKey, sharedKeyToTxId, sharedGeneration } = bumpIdlePeerGenerations peersToWake $ st { sharedPeers = sharedPeers', sharedTxTable = sharedTxTable', @@ -202,49 +205,53 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar sharedGeneration = sharedGeneration + 1 } where + leavingAdvertisedKeys = + maybe IntSet.empty sharedPeerAdvertisedTxKeys (Map.lookup peeraddr sharedPeers) sharedPeers' = Map.delete peeraddr sharedPeers - (sharedTxTable', peersToWake) = - IntMap.foldlWithKey' scrubOne (IntMap.empty, Set.empty) sharedTxTable + scanState = + st { sharedPeers = sharedPeers' } + + (sharedTxTable', wakeKeys) = + IntSet.foldl' scrubOne (sharedTxTable, IntSet.empty) leavingAdvertisedKeys + peersToWake = + State.advertisingPeersForTxKeysExcept peeraddr wakeKeys scanState liveKeys = IntMap.keysSet sharedTxTable' `IntSet.union` retainedKeysSet sharedRetainedTxs - scrubOne (txTableAcc, wakeAcc) k txEntry = - let touched = txTouchesPeer txEntry - txEntry' = scrubTxEntry txEntry - in if txLive txEntry' - then ( IntMap.insert k txEntry' txTableAcc - , if touched - then Set.union wakeAcc (Set.delete peeraddr (txAdvertisers txEntry')) - else wakeAcc - ) - else (txTableAcc, wakeAcc) - - scrubTxEntry txEntry@TxEntry { txLease, txAdvertisers, txAttempts } = + scrubOne (txTableAcc, wakeKeysAcc) k = + case IntMap.lookup k txTableAcc of + Just txEntry -> + let txEntry' = scrubTxEntry txEntry + txTableAcc' = + if txLive txEntry' + then IntMap.insert k txEntry' txTableAcc + else IntMap.delete k txTableAcc + wakeKeysAcc' = + if txLive txEntry' + then IntSet.insert k wakeKeysAcc + else wakeKeysAcc + in (txTableAcc', wakeKeysAcc') + Nothing -> + (txTableAcc, wakeKeysAcc) + + scrubTxEntry txEntry@TxEntry { txLease, txAdvertiserCount, txAttempts } = txEntry { txLease = scrubLease txLease, - txAdvertisers = Set.delete peeraddr txAdvertisers, + txAdvertiserCount = txAdvertiserCount - 1, txAttempts = Map.delete peeraddr txAttempts } scrubLease (TxLeased owner leaseUntil) - | owner == peeraddr = TxClaimable + | owner == peeraddr = TxClaimable now | otherwise = TxLeased owner leaseUntil - scrubLease TxClaimable = TxClaimable - - txTouchesPeer TxEntry { txLease, txAdvertisers, txAttempts } = - leaseOwnedByPeer txLease - || Set.member peeraddr txAdvertisers - || Map.member peeraddr txAttempts + scrubLease claimable@TxClaimable {} = claimable - txLive TxEntry { txLease, txAdvertisers, txAttempts } = + txLive TxEntry { txLease, txAdvertiserCount, txAttempts } = leaseLive txLease - || not (Set.null txAdvertisers) + || txAdvertiserCount > 0 || not (Map.null txAttempts) - leaseOwnedByPeer (TxLeased owner _) = owner == peeraddr - leaseOwnedByPeer TxClaimable = False - - leaseLive TxClaimable = False + leaseLive TxClaimable {} = False leaseLive (TxLeased _ _) = True -- | Wait until either the peer's generation changes from the given @@ -511,10 +518,9 @@ resolveBufferedTxsImp sharedStateVar peerState txKeys = atomically $ do -- same peer thread to sleep on a stale generation. This makes its next -- 'awaitSharedChange' return immediately and re-run scheduling as an idle -- claimant. --- * When a peer leaves 'PeerIdle', bump the generations of other advertisers --- for txs advertised by that peer. Claim-owner selection only considers idle --- peers, so removing one idle advertiser can change which remaining idle --- peer should wake and claim a tx. +-- * When a peer becomes 'PeerIdle', bump that peer's own generation so it +-- immediately re-runs scheduling against any txs whose score-derived claim +-- delay may already have elapsed. updatePeerPhase :: Ord peeraddr => Time @@ -553,25 +559,8 @@ updatePeerPhase now policy peeraddr peerPhaseNew phaseWakePeers peerPhaseOld | peerPhaseOld /= PeerIdle , peerPhaseNew == PeerIdle = Set.singleton peeraddr - | peerPhaseOld == PeerIdle - , peerPhaseNew /= PeerIdle = advertisersForPeerTxsExcept peeraddr st | otherwise = Set.empty - -advertisersForPeerTxsExcept - :: Ord peeraddr - => peeraddr - -> SharedTxState peeraddr txid - -> Set.Set peeraddr -advertisersForPeerTxsExcept peeraddr SharedTxState { sharedTxTable } = - IntMap.foldl' collect Set.empty sharedTxTable - where - collect peers TxEntry { txAdvertisers } - | Set.member peeraddr txAdvertisers = - Set.union peers (Set.delete peeraddr txAdvertisers) - | otherwise = - peers - peerPhaseForActionIdle :: PeerAction -> PeerPhase peerPhaseForActionIdle peerAction = case peerAction of diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 1518f66dd77..a5bd9e1689a 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -9,14 +9,15 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.State , markSubmittingTxs , nextPeerAction , nextPeerActionPipelined + , advertisingPeersForTxKeysExcept + , advertisingPeersForTxExcept + , removeAdvertisingPeersForResolvedTx ) where -import Control.Exception (assert) import Control.Monad.Class.MonadTime.SI (DiffTime, Time, addTime, diffTime) import Data.Foldable (foldl', toList) import Data.IntMap.Strict qualified as IntMap import Data.IntSet qualified as IntSet -import Data.List (sort) import Data.Map.Strict qualified as Map import Data.Sequence.Strict qualified as StrictSeq import Data.Set qualified as Set @@ -43,10 +44,10 @@ data PeerActionContext peeraddr txid tx = PeerActionContext { pacPeerState :: !(PeerTxLocalState tx), -- | Shared tx-submission state after shared pruning has been applied. pacSharedState :: !(SharedTxState peeraddr txid), - -- | Decayed scores for peers that are currently idle and eligible to claim work. - -- Note that this field is lazy to avoid calculating the idle scores when it - -- isn't needed. - pacIdlePeerScores :: (Map.Map peeraddr Double) + -- | This peer's shared state after pruning. + pacSharedPeerState :: !SharedPeerState, + -- | Score-derived delay this peer must wait after a tx becomes claimable. + pacClaimDelay :: !DiffTime } data PeerActionChoice peeraddr = @@ -58,7 +59,7 @@ data PeerActionChoice peeraddr = -- | Build a precomputed context for selecting the next action for a peer. -- -- -mkPeerActionContext :: Ord txid +mkPeerActionContext :: (Ord peeraddr, Ord txid) => Time -> TxDecisionPolicy -> peeraddr @@ -72,7 +73,8 @@ mkPeerActionContext now policy peeraddr peerState sharedState = pacPeerAddr = peeraddr, pacPeerState = peerState', pacSharedState = sharedState', - pacIdlePeerScores = idlePeerScores + pacSharedPeerState = sharedPeerState', + pacClaimDelay = peerClaimDelay policy now (sharedPeerScore sharedPeerState') } where -- Remove expireds TX keys from the shared state @@ -91,12 +93,11 @@ mkPeerActionContext now policy peeraddr peerState sharedState = peerDownloadedTxs = IntMap.restrictKeys (peerDownloadedTxs peerState) (IntMap.keysSet (sharedTxTable sharedState')) } - idlePeerScores = - Map.mapMaybe toIdleScore (sharedPeers sharedState') - where - toIdleScore SharedPeerState { sharedPeerPhase, sharedPeerScore } - | sharedPeerPhase == PeerIdle = Just (currentPeerScore policy now sharedPeerScore) - | otherwise = Nothing + sharedPeerState' = + case Map.lookup peeraddr (sharedPeers sharedState') of + Just sharedPeerState -> sharedPeerState + Nothing -> + error "TxSubmission.V2.mkPeerActionContext: missing peer" -- | Compute the next peer-local action. nextPeerAction :: (Ord peeraddr, Ord txid) @@ -198,6 +199,8 @@ applyRequestTxsChoice ctx txsToRequest txsToRequestSize txTable = , sharedState'' ) where + requestedKeys = IntSet.fromList (unTxKey <$> txsToRequest) + peerState'' = (pacPeerState ctx) { peerRequestedTxs = @@ -211,7 +214,7 @@ applyRequestTxsChoice ctx txsToRequest txsToRequestSize txTable = } sharedState'' = bumpIdlePeerGenerations - (advertisersForKeysExcept (pacPeerAddr ctx) txTable txsToRequest) + (advertisingPeersForTxKeysExcept (pacPeerAddr ctx) requestedKeys (pacSharedState ctx)) ((pacSharedState ctx) { sharedTxTable = txTable, sharedGeneration = sharedGeneration (pacSharedState ctx) + 1 @@ -333,7 +336,7 @@ pickRequestTxsAction ctx@PeerActionContext { pacNow, pacPolicy, pacPeerState, pa else case IntMap.lookup k txTable of Just txEntry -> - if txSelectable ctx txEntry + if txSelectable ctx (TxKey k) txEntry then go (TxKey k : selectedRev) (selectedSize + txSize) @@ -396,26 +399,25 @@ pickRequestTxIdsAction txIdRequestMode ctx@PeerActionContext { pacPolicy, pacPee (fromIntegral (maxNumTxIdsToRequest pacPolicy) - numOfRequested) -- | Compute the time delay until the peer should next wake to check for work. -nextWakeDelay :: Ord peeraddr - => PeerActionContext peeraddr txid tx - -> Maybe DiffTime -nextWakeDelay PeerActionContext { pacNow, pacPeerAddr, pacSharedState } = +nextWakeDelay :: PeerActionContext peeraddr txid tx -> Maybe DiffTime +nextWakeDelay PeerActionContext { pacNow, pacClaimDelay, pacSharedPeerState, pacSharedState } = (`diffTime` pacNow) <$> minMaybe nextLeaseWake nextRetainWake where nextLeaseWake = - IntMap.foldl' stepLease Nothing (sharedTxTable pacSharedState) + IntSet.foldl' stepLease Nothing (sharedPeerAdvertisedTxKeys pacSharedPeerState) - stepLease acc txEntry@TxEntry { txLease } = - if Set.member pacPeerAddr (txAdvertisers txEntry) - then minMaybe acc (futureLeaseWake txLease) - else acc + stepLease acc k = + case IntMap.lookup k (sharedTxTable pacSharedState) of + Just txEntry -> + minMaybe acc (futureClaimWake txEntry) + Nothing -> + acc nextRetainWake = retainedNextWake pacNow (sharedRetainedTxs pacSharedState) - futureLeaseWake TxClaimable = Nothing - futureLeaseWake (TxLeased _ leaseUntil) - | leaseUntil > pacNow = Just leaseUntil - | otherwise = Nothing + futureClaimWake txEntry = + let claimWake = txClaimReadyAt pacClaimDelay txEntry + in if claimWake > pacNow then Just claimWake else Nothing minMaybe :: Ord a => Maybe a -> Maybe a -> Maybe a minMaybe Nothing y = y @@ -437,50 +439,29 @@ claimTx peeraddr leaseUntil txEntry@TxEntry { txAttempts } = -- | Determine if a tx is eligible for this peer to request. -- -- A tx is selectable if it can be claimed or is already owned by this peer --- and still being advertised. +-- and this peer's score-derived claim delay has elapsed. txSelectable :: Ord peeraddr => PeerActionContext peeraddr txid tx + -> TxKey -> TxEntry peeraddr -> Bool -txSelectable PeerActionContext { pacNow, pacPeerAddr, pacPolicy, pacIdlePeerScores } - txEntry@TxEntry { txAdvertisers, txTieBreakSalt } +txSelectable PeerActionContext { pacNow, pacPeerAddr, pacPolicy, pacSharedPeerState + , pacClaimDelay } + txKey + txEntry | txSubmittingAnywhere txEntry = False | txPeerHasAttempt = False | txActiveAttemptCount txEntry >= txInflightMultiplicity pacPolicy = False - | txOwnedByPeer txEntry && Set.member pacPeerAddr txAdvertisers = True - | otherwise = - let peerMayClaim = Set.member pacPeerAddr txAdvertisers && - (case pickClaimOwner of - Just owner -> owner == pacPeerAddr - Nothing -> False) in - case txLease txEntry of - TxClaimable -> peerMayClaim - TxLeased _ leaseExpiry -> (leaseExpiry <= pacNow) && peerMayClaim + | not peerAdvertisesTx = False + | txOwnedByPeer txEntry = True + | otherwise = txClaimReadyAt pacClaimDelay txEntry <= pacNow where - - -- Select which idle advertiser should claim a tx lease based on - -- peer score. - pickClaimOwner = - case eligiblePeers of - [] -> Nothing - _ -> Just (pickBestPeer eligiblePeers) - where - eligiblePeers = - [ (candidate, score) - | candidate <- Set.toList txAdvertisers - , Just score <- [Map.lookup candidate pacIdlePeerScores] - ] - - pickBestPeer peers = - case sort [ candidate | (candidate, score) <- peers, score == bestScore ] of - [] -> assert False pacPeerAddr - tied -> tied !! (txTieBreakSalt `mod` length tied) - where - bestScore = minimum [ score | (_, score) <- peers ] + peerAdvertisesTx = + IntSet.member (unTxKey txKey) (sharedPeerAdvertisedTxKeys pacSharedPeerState) -- txOwnedByPeer :: TxEntry peeraddr -> Bool txOwnedByPeer TxEntry { txLease = TxLeased owner _ } = owner == pacPeerAddr - txOwnedByPeer TxEntry { txLease = TxClaimable } = False + txOwnedByPeer TxEntry { txLease = TxClaimable _ } = False txPeerHasAttempt = case txAttemptOfPeer pacPeerAddr txEntry of @@ -539,6 +520,49 @@ currentPeerScore TxDecisionPolicy { scoreRate } currentTime | currentTime == peerScoreTs = peerScoreValue | otherwise = max 0 $ peerScoreValue - realToFrac (diffTime currentTime peerScoreTs) * scoreRate +peerClaimDelay :: TxDecisionPolicy + -> Time + -> PeerScore + -> DiffTime +peerClaimDelay policy currentTime = + -- Delay contribution in milliseconds is peerScore / 20, then converted to seconds. + realToFrac . (/ 20000) . currentPeerScore policy currentTime + +txClaimReadyAt :: DiffTime -> TxEntry peeraddr -> Time +txClaimReadyAt claimDelay TxEntry { txLease } = + addTime claimDelay claimableAt + where + claimableAt = + case txLease of + TxLeased _ leaseUntil -> leaseUntil + TxClaimable readyAt -> readyAt + +updatePeerAdvertisedTxKeys + :: Ord peeraddr + => peeraddr + -> (IntSet.IntSet -> (a, IntSet.IntSet)) + -> SharedTxState peeraddr txid + -> (a, SharedTxState peeraddr txid) +updatePeerAdvertisedTxKeys peeraddr updateKeys st@SharedTxState { sharedPeers } = + case Map.lookup peeraddr sharedPeers of + Just sharedPeerState -> + let oldKeys = sharedPeerAdvertisedTxKeys sharedPeerState + (result, newKeys) = updateKeys oldKeys + in if newKeys == oldKeys + then (result, st) + else + ( result + , st { + sharedPeers = + Map.insert + peeraddr + (sharedPeerState { sharedPeerAdvertisedTxKeys = newKeys }) + sharedPeers + } + ) + Nothing -> + error "TxSubmission.V2.updatePeerAdvertisedTxKeys: missing peer" + -- | Acknowledge txids from a peer and update shared state. acknowledgeTxIds :: (Ord peeraddr, Ord txid) => peeraddr @@ -547,26 +571,29 @@ acknowledgeTxIds :: (Ord peeraddr, Ord txid) -> SharedTxState peeraddr txid acknowledgeTxIds _ [] st = st acknowledgeTxIds peeraddr acknowledgedTxIds st = - case foldl' acknowledgeOne (False, st) acknowledgedTxIds of - (False, _) -> st - (True, st') -> st' { sharedGeneration = sharedGeneration st + 1 } + case IntSet.null removedKeys of + True -> st + False -> + let st'' = IntSet.foldl' acknowledgeOne st' removedKeys + in st'' { sharedGeneration = sharedGeneration st + 1 } where - acknowledgeOne acc0@(_, acc) (TxKey k) = + acknowledgedKeys = IntSet.fromList (unTxKey <$> acknowledgedTxIds) + (removedKeys, st') = + updatePeerAdvertisedTxKeys peeraddr removeAdvertisedKeys st + + removeAdvertisedKeys advertisedKeys = + let removed = IntSet.intersection acknowledgedKeys advertisedKeys + in (removed, advertisedKeys `IntSet.difference` removed) + + acknowledgeOne acc k = case IntMap.lookup k (sharedTxTable acc) of - Just txEntry@TxEntry { txAdvertisers } -> - if Set.member peeraddr txAdvertisers - then - let txAdvertisers' = Set.delete peeraddr txAdvertisers - txEntry' = txEntry { txAdvertisers = txAdvertisers' } - !acc' = - if activeTxLive txEntry' - then acc { sharedTxTable = IntMap.insert k txEntry' (sharedTxTable acc) } - else dropTxKey k acc - in (True, acc') - else - acc0 - Nothing -> - acc0 + Just txEntry -> + let txEntry' = txEntry { txAdvertiserCount = txAdvertiserCount txEntry - 1 } + in if activeTxLive txEntry' + then acc { sharedTxTable = IntMap.insert k txEntry' (sharedTxTable acc) } + else dropTxKey k acc + Nothing -> + acc -- | Determine if an unacknowledged txid is ready to be acknowledged. -- @@ -578,17 +605,18 @@ txIdAckable :: Ord peeraddr => PeerActionContext peeraddr txid tx -> TxKey -> Bool -txIdAckable PeerActionContext { pacPeerAddr, pacPeerState, pacSharedState } (TxKey k) +txIdAckable PeerActionContext { pacPeerAddr, pacPeerState, pacSharedPeerState, pacSharedState } + (TxKey k) | retainedMember k (sharedRetainedTxs pacSharedState) = True | otherwise = case IntMap.lookup k (sharedTxTable pacSharedState) of - Just txEntry@TxEntry { txLease, txAdvertisers, txAttempts } -> - if Set.member pacPeerAddr txAdvertisers + Just txEntry@TxEntry { txLease, txAttempts } -> + if IntSet.member k (sharedPeerAdvertisedTxKeys pacSharedPeerState) then let ackWhenBuffered = case txLease of TxLeased owner _ -> owner == pacPeerAddr || Map.member pacPeerAddr txAttempts - TxClaimable -> Map.member pacPeerAddr txAttempts + TxClaimable _ -> Map.member pacPeerAddr txAttempts in if ackWhenBuffered then @@ -663,39 +691,117 @@ dropDeadActiveKeys keys st@SharedTxState { sharedTxTable } = -- A TX entry is alive if there is a lease, there are advertisers for it or there are -- download attempts for it. activeTxLive :: TxEntry peeraddr -> Bool -activeTxLive TxEntry { txLease, txAdvertisers, txAttempts } = +activeTxLive TxEntry { txLease, txAdvertiserCount, txAttempts } = leaseLive txLease - || not (Set.null txAdvertisers) + || txAdvertiserCount > 0 || not (Map.null txAttempts) where - leaseLive TxClaimable = False + leaseLive TxClaimable {} = False leaseLive TxLeased {} = True --- | Collect the advertisers of the given active tx keys, excluding one peer. -advertisersForKeysExcept +peerAdvertisesTxKey :: Int -> SharedPeerState -> Bool +peerAdvertisesTxKey k SharedPeerState { sharedPeerAdvertisedTxKeys } = + IntSet.member k sharedPeerAdvertisedTxKeys + +peerAdvertisesAnyTxKey + :: IntSet.IntSet + -> SharedPeerState + -> Bool +peerAdvertisesAnyTxKey targetKeys sharedPeerState = + IntSet.foldr (\k acc -> peerAdvertisesTxKey k sharedPeerState || acc) False targetKeys + +advertisingPeersForTxKeysExcept :: Ord peeraddr => peeraddr - -> IntMap.IntMap (TxEntry peeraddr) - -> [TxKey] + -> IntSet.IntSet + -> SharedTxState peeraddr txid -> Set.Set peeraddr -advertisersForKeysExcept currentPeer txTable = - foldl' collectAdvertisers Set.empty +advertisingPeersForTxKeysExcept _ targetKeys _ + | IntSet.null targetKeys = Set.empty +advertisingPeersForTxKeysExcept currentPeer targetKeys SharedTxState { sharedPeers } = + Map.foldlWithKey' collect Set.empty sharedPeers where - collectAdvertisers peers (TxKey k) = - case IntMap.lookup k txTable of - Just txEntry -> - Set.union peers (advertisersForEntryExcept currentPeer txEntry) - Nothing -> - peers - --- | Get all advertisers for a transaction entry, excluding a specific peer. -advertisersForEntryExcept :: Ord peeraddr - => peeraddr - -> TxEntry peeraddr - -> Set.Set peeraddr -advertisersForEntryExcept currentPeer TxEntry { txAdvertisers } = - Set.delete currentPeer txAdvertisers + collect acc peeraddr sharedPeerState + | peeraddr == currentPeer = acc + | peerAdvertisesAnyTxKey targetKeys sharedPeerState = Set.insert peeraddr acc + | otherwise = acc + +advertisingPeersForTx + :: Ord peeraddr + => TxKey + -> Map.Map peeraddr SharedPeerState + -> Set.Set peeraddr +advertisingPeersForTx (TxKey k) = + Map.foldlWithKey' collect Set.empty + where + collect acc peeraddr sharedPeerState + | peerAdvertisesTxKey k sharedPeerState = Set.insert peeraddr acc + | otherwise = acc + +advertisingPeersForTxExcept + :: Ord peeraddr + => peeraddr + -> TxKey + -> SharedTxState peeraddr txid + -> Set.Set peeraddr +advertisingPeersForTxExcept currentPeer txKey = + advertisingPeersForTxKeysExcept currentPeer (IntSet.singleton (unTxKey txKey)) + +removeAdvertisingPeersForResolvedTx + :: Ord peeraddr + => TxKey + -> SharedTxState peeraddr txid + -> (SharedTxState peeraddr txid, Set.Set peeraddr) +removeAdvertisingPeersForResolvedTx txKey@(TxKey k) st@SharedTxState { sharedPeers } + | Set.null advertisers = (st, advertisers) + | otherwise = + ( st { + sharedPeers = + Set.foldl' clearAdvertisedKey sharedPeers advertisers + } + , advertisers + ) + where + advertisers = advertisingPeersForTx txKey sharedPeers + + clearAdvertisedKey peers peeraddr = + Map.adjust + (\sharedPeerState -> + sharedPeerState { + sharedPeerAdvertisedTxKeys = + IntSet.delete k (sharedPeerAdvertisedTxKeys sharedPeerState) + }) + peeraddr + peers + +removeAdvertisingPeersForResolvedTxExcept + :: Ord peeraddr + => peeraddr + -> TxKey + -> SharedTxState peeraddr txid + -> (SharedTxState peeraddr txid, Set.Set peeraddr) +removeAdvertisingPeersForResolvedTxExcept currentPeer txKey@(TxKey k) st@SharedTxState { sharedPeers } + | Set.null advertisers = (st, advertisers) + | otherwise = + ( st { + sharedPeers = + Set.foldl' clearAdvertisedKey sharedPeers advertisers + } + , advertisers + ) + where + advertisers = advertisingPeersForTxExcept currentPeer txKey st + + clearAdvertisedKey peers peeraddr = + Map.adjust + (\sharedPeerState -> + sharedPeerState { + sharedPeerAdvertisedTxKeys = + IntSet.delete k (sharedPeerAdvertisedTxKeys sharedPeerState) + }) + peeraddr + peers -- | Handle a batch of tx bodies received from one peer. @@ -730,11 +836,10 @@ handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = (batch, batches) -- Process each received tx, collecting late counts, pending requests, - -- updated tables, and peers to wake up. + -- updated shared state, and peers to wake up. ( lateCount , pendingRequestedKeys - , txTableHandled - , retainedHandled + , sharedStateHandled , receivedWakePeers , peerDownloadedTxs' ) = @@ -742,26 +847,27 @@ handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = handleOne ( 0 , requestedKeys - , sharedTxTable sharedState - , sharedRetainedTxs sharedState + , sharedState , Set.empty , peerDownloadedTxs peerState ) txs + (omittedAdvertisedKeys, sharedStateReleased0) = + updatePeerAdvertisedTxKeys peeraddr removeOmittedAdvertisedKeys sharedStateHandled + -- Process omitted (not received) txs: count a penalty for every omitted -- request, release ownership for keys that are still live, and collect -- peers to wake up. - (omittedCount, txTableReleased, omittedWakePeers) = - IntSet.foldl' handleOmitted (0, txTableHandled, Set.empty) pendingRequestedKeys + (omittedCount, sharedStateReleased, omittedWakePeers) = + IntSet.foldl' handleOmitted (0, sharedStateReleased0, Set.empty) pendingRequestedKeys -- Build the final shared state with updated tables and cleaned-up dead entries. sharedState'' = - dropDeadActiveKeys pendingRequestedKeys sharedState { - sharedTxTable = txTableReleased, - sharedRetainedTxs = retainedHandled, - sharedGeneration = sharedGeneration sharedState + 1 - } + dropDeadActiveKeys pendingRequestedKeys $ + sharedStateReleased { + sharedGeneration = sharedGeneration sharedState + 1 + } -- Update peer state: remove processed keys, update batch tracking, and record -- downloaded txs. @@ -784,14 +890,16 @@ handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = IntMap.member k (sharedTxTable sharedState) || retainedMember k (sharedRetainedTxs sharedState) + removeOmittedAdvertisedKeys advertisedKeys = + let removed = IntSet.intersection pendingRequestedKeys advertisedKeys + in (removed, advertisedKeys `IntSet.difference` removed) -- Fold function over received txs: classify as late, already in mempool, or buffer for -- download. handleOne ( lateCountAcc , pendingKeysAcc - , txTableAcc - , retainedAcc + , sharedAcc , wakePeersAcc , downloadedAcc ) @@ -800,69 +908,90 @@ handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = Nothing -> ( lateCountAcc + 1 , pendingKeysAcc - , txTableAcc - , retainedAcc + , sharedAcc , wakePeersAcc , downloadedAcc ) - Just (TxKey k) - | retainedMember k retainedAcc -> - ( lateCountAcc + 1 - , IntSet.delete k pendingKeysAcc - , IntMap.delete k txTableAcc - , retainedAcc - , wakePeersAcc - , downloadedAcc - ) + Just txKey@(TxKey k) + | retainedMember k (sharedRetainedTxs sharedAcc) -> + let sharedAcc' = + sharedAcc { + sharedTxTable = IntMap.delete k (sharedTxTable sharedAcc) + } + in ( lateCountAcc + 1 + , IntSet.delete k pendingKeysAcc + , sharedAcc' + , wakePeersAcc + , downloadedAcc + ) | mempoolHasTx txid -> - let wakePeers = - case IntMap.lookup k txTableAcc of - Just txEntry -> - Set.union (advertisersForEntryExcept peeraddr txEntry) - wakePeersAcc - Nothing -> - wakePeersAcc in - ( lateCountAcc + 1 - , IntSet.delete k pendingKeysAcc - , IntMap.delete k txTableAcc - , retainedInsertMax k retainUntil retainedAcc - , wakePeers - , downloadedAcc - ) + let (sharedAcc', advertisers) = + case IntMap.lookup k (sharedTxTable sharedAcc) of + Just _ -> + removeAdvertisingPeersForResolvedTx txKey sharedAcc + Nothing -> + (sharedAcc, Set.empty) + wakePeers = + Set.union wakePeersAcc (Set.delete peeraddr advertisers) + sharedAcc'' = + sharedAcc' { + sharedTxTable = IntMap.delete k (sharedTxTable sharedAcc'), + sharedRetainedTxs = + retainedInsertMax k retainUntil (sharedRetainedTxs sharedAcc') + } + in ( lateCountAcc + 1 + , IntSet.delete k pendingKeysAcc + , sharedAcc'' + , wakePeers + , downloadedAcc + ) | otherwise -> - case IntMap.lookup k txTableAcc of + case IntMap.lookup k (sharedTxTable sharedAcc) of Just txEntry | peerHasAttempt txEntry -> ( lateCountAcc , IntSet.delete k pendingKeysAcc - , IntMap.insert k (markBuffered txEntry) txTableAcc - , retainedAcc + , sharedAcc { + sharedTxTable = + IntMap.insert k (markBuffered txEntry) + (sharedTxTable sharedAcc) + } , wakePeersAcc , IntMap.insert k tx downloadedAcc ) _ -> ( lateCountAcc + 1 , IntSet.delete k pendingKeysAcc - , txTableAcc - , retainedAcc + , sharedAcc , wakePeersAcc , downloadedAcc ) -- Handle omitted (not received) txs: release ownership, count penalties, -- and wake up other advertisers if the tx is still active. - handleOmitted (omittedCountAcc, txTableAcc, wakePeersAcc) k + handleOmitted (omittedCountAcc, sharedAcc, wakePeersAcc) k | keyWasLive k = - let txTableAcc' = releaseOne txTableAcc k + let sharedAcc' = + case IntMap.lookup k (sharedTxTable sharedAcc) of + Just txEntry -> + sharedAcc { + sharedTxTable = + IntMap.insert k (releaseLease (IntSet.member k omittedAdvertisedKeys) txEntry) + (sharedTxTable sharedAcc) + } + Nothing -> + sharedAcc wakePeersAcc' = - case IntMap.lookup k txTableAcc' of + case IntMap.lookup k (sharedTxTable sharedAcc') of Just txEntry | activeTxLive txEntry -> - Set.union (advertisersForEntryExcept peeraddr txEntry) wakePeersAcc + Set.union + (advertisingPeersForTxExcept peeraddr (TxKey k) sharedAcc') + wakePeersAcc _ -> wakePeersAcc in - (omittedCountAcc + 1, txTableAcc', wakePeersAcc') + (omittedCountAcc + 1, sharedAcc', wakePeersAcc') | otherwise = - (omittedCountAcc + 1, txTableAcc, wakePeersAcc) + (omittedCountAcc + 1, sharedAcc, wakePeersAcc) peerHasAttempt TxEntry { txAttempts } = Map.member peeraddr txAttempts @@ -870,15 +999,15 @@ handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = markBuffered txEntry@TxEntry { txAttempts } = txEntry { txAttempts = Map.insert peeraddr TxBuffered txAttempts } - releaseOne txTable k = - IntMap.adjust releaseLease k txTable - - releaseLease txEntry@TxEntry { txLease, txAdvertisers, txAttempts } = + releaseLease wasAdvertised txEntry@TxEntry { txLease, txAdvertiserCount, txAttempts } = txEntry { txLease = case txLease of - TxLeased owner _ | owner == peeraddr -> TxClaimable + TxLeased owner _ | owner == peeraddr -> TxClaimable now _ -> txLease, - txAdvertisers = Set.delete peeraddr txAdvertisers, + txAdvertiserCount = + if wasAdvertised + then txAdvertiserCount - 1 + else txAdvertiserCount, txAttempts = Map.delete peeraddr txAttempts } @@ -910,44 +1039,79 @@ handleSubmittedTxs now policy peeraddr acceptedTxs rejectedTxs peerState sharedS IntSet.foldl' (flip IntMap.delete) (peerDownloadedTxs peerState) submittedKeys } - (acceptedAdvertisers, activeTableAfterAccepted, retainedTxs') = acceptSubmittedTxs + (sharedStateAfterAccepted, acceptedAdvertisers) = + foldl' acceptSubmittedTx (sharedState, Set.empty) acceptedTxs - rejectedActive = IntSet.foldl' updateRejected activeTableAfterAccepted rejectedKeys + (rejectedAdvertisedKeys, sharedStateAfterRejectedPeer) = + updatePeerAdvertisedTxKeys peeraddr removeRejectedAdvertisedKeys sharedStateAfterAccepted + + (sharedStateAfterRejected, rejectedWakePeers) = + IntSet.foldl' updateRejected (sharedStateAfterRejectedPeer, Set.empty) rejectedKeys sharedState' = bumpIdlePeerGenerations - (Set.union acceptedAdvertisers (advertisersForKeysExcept peeraddr txTable' (fmap TxKey (IntSet.toList rejectedKeys)))) + (Set.union acceptedAdvertisers rejectedWakePeers) sharedState'' sharedState'' = - dropDeadActiveKeys rejectedKeys sharedState { - sharedTxTable = rejectedActive, - sharedRetainedTxs = retainedTxs', - sharedGeneration = sharedGeneration sharedState + 1 - } - - txTable' = sharedTxTable sharedState'' + dropDeadActiveKeys rejectedKeys $ + sharedStateAfterRejected { + sharedGeneration = sharedGeneration sharedState + 1 + } retainedUntil = addTime (bufferedTxsMinLifetime policy) now - acceptSubmittedTxs = - ( advertisersForKeysExcept peeraddr (sharedTxTable sharedState) acceptedTxs - , IntMap.withoutKeys (sharedTxTable sharedState) acceptedKeys - , IntSet.foldl' - (\retained k -> retainedInsertMax k retainedUntil retained) - (sharedRetainedTxs sharedState) - acceptedKeys - ) - - updateRejected txTable k = - IntMap.adjust markRejected k txTable - - markRejected txEntry@TxEntry { txLease, txAdvertisers, txAttempts } = + removeRejectedAdvertisedKeys advertisedKeys = + let removed = IntSet.intersection rejectedKeys advertisedKeys + in (removed, advertisedKeys `IntSet.difference` removed) + + acceptSubmittedTx (sharedAcc, wakePeersAcc) txKey@(TxKey k) = + let (sharedAcc', advertisers) = + case IntMap.lookup k (sharedTxTable sharedAcc) of + Just _ -> + removeAdvertisingPeersForResolvedTx txKey sharedAcc + Nothing -> + (sharedAcc, Set.empty) + sharedAcc'' = + sharedAcc' { + sharedTxTable = IntMap.delete k (sharedTxTable sharedAcc'), + sharedRetainedTxs = + retainedInsertMax k retainedUntil (sharedRetainedTxs sharedAcc') + } + in (sharedAcc'', Set.union wakePeersAcc (Set.delete peeraddr advertisers)) + + updateRejected (sharedAcc, wakePeersAcc) k = + let sharedAcc' = + case IntMap.lookup k (sharedTxTable sharedAcc) of + Just txEntry -> + sharedAcc { + sharedTxTable = + IntMap.insert k + (markRejected (IntSet.member k rejectedAdvertisedKeys) txEntry) + (sharedTxTable sharedAcc) + } + Nothing -> + sharedAcc + wakePeersAcc' = + case IntMap.lookup k (sharedTxTable sharedAcc') of + Just txEntry + | activeTxLive txEntry -> + Set.union + (advertisingPeersForTxExcept peeraddr (TxKey k) sharedAcc') + wakePeersAcc + _ -> + wakePeersAcc + in (sharedAcc', wakePeersAcc') + + markRejected wasAdvertised txEntry@TxEntry { txLease, txAdvertiserCount, txAttempts } = txEntry { txLease = case txLease of - TxLeased owner _ | owner == peeraddr -> TxClaimable + TxLeased owner _ | owner == peeraddr -> TxClaimable now _ -> txLease, - txAdvertisers = Set.delete peeraddr txAdvertisers, + txAdvertiserCount = + if wasAdvertised + then txAdvertiserCount - 1 + else txAdvertiserCount, txAttempts = Map.delete peeraddr txAttempts } @@ -997,33 +1161,37 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize peerState sharedState = (peerState'', sharedState'') where + sharedPeerState0 = + case Map.lookup peeraddr (sharedPeers sharedState) of + Just sharedPeerState -> sharedPeerState + Nothing -> + error "TxSubmission.V2.handleReceivedTxIds: missing peer" + + peerAdvertisedKeys0 = sharedPeerAdvertisedTxKeys sharedPeerState0 -- Fold over received txids: build unacknowledged list, update tables, -- and track peers to wake based on tx state (retained/mempool/new). - ( peerUnacknowledgedTxIds' + ( receivedTxKeysRev , peerAvailableTxIds' - , sharedTxIdToKey' - , sharedKeyToTxId' - , sharedNextTxKey' - , sharedTxTable' - , sharedRetainedTxs' + , sharedStateHandled + , peerAdvertisedKeys' , peersToWake , sharedChanged ) = foldl' step - ( peerUnacknowledgedTxIds peerState + ( [] , peerAvailableTxIds peerState - , sharedTxIdToKey sharedState - , sharedKeyToTxId sharedState - , sharedNextTxKey sharedState - , sharedTxTable sharedState - , sharedRetainedTxs sharedState + , sharedState + , peerAdvertisedKeys0 , Set.empty , False ) txidsAndSizes + peerUnacknowledgedTxIds' = + peerUnacknowledgedTxIds peerState <> StrictSeq.fromList (reverse receivedTxKeysRev) + peerState'' = peerState { peerUnacknowledgedTxIds = peerUnacknowledgedTxIds', peerRequestedTxIds = fromIntegral $ @@ -1033,14 +1201,17 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize } sharedState'' - | sharedChanged = + | sharedChanged || peerAdvertisedKeys' /= peerAdvertisedKeys0 = bumpIdlePeerGenerations peersToWake $ - sharedState { - sharedTxIdToKey = sharedTxIdToKey', - sharedKeyToTxId = sharedKeyToTxId', - sharedNextTxKey = sharedNextTxKey', - sharedTxTable = sharedTxTable', - sharedRetainedTxs = sharedRetainedTxs', + sharedStateHandled { + sharedPeers = + if peerAdvertisedKeys' == peerAdvertisedKeys0 + then sharedPeers sharedStateHandled + else + Map.insert + peeraddr + (sharedPeerState0 { sharedPeerAdvertisedTxKeys = peerAdvertisedKeys' }) + (sharedPeers sharedStateHandled), sharedGeneration = sharedGeneration sharedState + 1 } | otherwise = @@ -1050,114 +1221,118 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize -- Process each received txid: classify as retained, in mempool, or new entry. step - :: ( StrictSeq.StrictSeq TxKey + :: ( [TxKey] , IntMap.IntMap SizeInBytes - , Map.Map txid TxKey - , IntMap.IntMap txid - , Int - , IntMap.IntMap (TxEntry peeraddr) - , RetainedTxs + , SharedTxState peeraddr txid + , IntSet.IntSet , Set.Set peeraddr , Bool ) -> (txid, SizeInBytes) - -> ( StrictSeq.StrictSeq TxKey + -> ( [TxKey] , IntMap.IntMap SizeInBytes - , Map.Map txid TxKey - , IntMap.IntMap txid - , Int - , IntMap.IntMap (TxEntry peeraddr) - , RetainedTxs + , SharedTxState peeraddr txid + , IntSet.IntSet , Set.Set peeraddr , Bool ) step ( !unacknowledgedAcc , !availableAcc - , !txIdToKeyAcc - , !keyToTxIdAcc - , !nextTxKeyAcc - , !txTableAcc - , !retainedAcc + , !sharedAcc + , !peerAdvertisedKeysAcc , !peersAcc , !sharedChangedAcc ) (txid, txSize) | retainedMember k retainedAcc = - ( unacknowledgedAcc StrictSeq.|> txKey + ( txKey : unacknowledgedAcc , IntMap.delete k availableAcc - , txIdToKeyAcc' - , keyToTxIdAcc' - , nextTxKeyAcc' - , txTableAcc - , retainedAcc + , sharedAcc' + , IntSet.delete k peerAdvertisedKeysAcc , peersAcc - , sharedChangedAcc + , sharedChangedAcc' ) | mempoolHasTx txid = - let wakePeers = case IntMap.lookup k txTableAcc of - Just txEntry -> Set.union (advertisersForEntryExcept peeraddr txEntry) peersAcc - Nothing -> peersAcc - in ( unacknowledgedAcc StrictSeq.|> txKey + let (sharedAcc'', advertisers) = + case IntMap.lookup k (sharedTxTable sharedAcc') of + Just _ -> + removeAdvertisingPeersForResolvedTxExcept peeraddr txKey sharedAcc' + Nothing -> + (sharedAcc', Set.empty) + wakePeers = + Set.union peersAcc (Set.delete peeraddr advertisers) + in ( txKey : unacknowledgedAcc , IntMap.delete k availableAcc - , txIdToKeyAcc' - , keyToTxIdAcc' - , nextTxKeyAcc' - , IntMap.delete k txTableAcc - , retainedInsertMax k retainUntil retainedAcc + , sharedAcc'' { + sharedTxTable = IntMap.delete k (sharedTxTable sharedAcc''), + sharedRetainedTxs = + retainedInsertMax k retainUntil (sharedRetainedTxs sharedAcc'') + } + , IntSet.delete k peerAdvertisedKeysAcc , wakePeers , True ) | otherwise = - case IntMap.lookup k txTableAcc of + case IntMap.lookup k (sharedTxTable sharedAcc') of Nothing -> let txEntry = TxEntry { txLease = TxLeased peeraddr (addTime (interTxSpace policy) now), - txAdvertisers = Set.singleton peeraddr, - txTieBreakSalt = k, + txAdvertiserCount = 1, txAttempts = Map.empty } - in ( unacknowledgedAcc StrictSeq.|> txKey + in ( txKey : unacknowledgedAcc , IntMap.insert k txSize availableAcc - , txIdToKeyAcc' - , keyToTxIdAcc' - , nextTxKeyAcc' - , IntMap.insert k txEntry txTableAcc - , retainedAcc + , sharedAcc' { + sharedTxTable = IntMap.insert k txEntry (sharedTxTable sharedAcc') + } + , IntSet.insert k peerAdvertisedKeysAcc , peersAcc , True ) Just txEntry -> - let (entryChanged, txEntry') = addAdvertiser txEntry - in ( unacknowledgedAcc StrictSeq.|> txKey - , IntMap.insert k txSize availableAcc - , txIdToKeyAcc' - , keyToTxIdAcc' - , nextTxKeyAcc' - , IntMap.insert k txEntry' txTableAcc - , retainedAcc + let (entryChanged, txEntry', peerAdvertisedKeysAcc') = + addAdvertiser k peerAdvertisedKeysAcc txEntry + availableAcc' = IntMap.insert k txSize availableAcc + sharedAcc'' = + if entryChanged + then + sharedAcc' { + sharedTxTable = IntMap.insert k txEntry' (sharedTxTable sharedAcc') + } + else + sharedAcc' + in ( txKey : unacknowledgedAcc + , availableAcc' + , sharedAcc'' + , peerAdvertisedKeysAcc' , peersAcc - , sharedChangedAcc || entryChanged + , sharedChangedAcc' || entryChanged ) where - (txKey@(TxKey k), txIdToKeyAcc', keyToTxIdAcc', nextTxKeyAcc') = - case Map.lookup txid txIdToKeyAcc of - Just existingKey -> - (existingKey, txIdToKeyAcc, keyToTxIdAcc, nextTxKeyAcc) - Nothing -> - let newKey = TxKey nextTxKeyAcc - in ( newKey - , Map.insert txid newKey txIdToKeyAcc - , IntMap.insert nextTxKeyAcc txid keyToTxIdAcc - , nextTxKeyAcc + 1 - ) + retainedAcc = sharedRetainedTxs sharedAcc' + sharedChangedAcc' = sharedChangedAcc || txKeyWasNew + (txKey@(TxKey k), txKeyWasNew, sharedAcc') = lookupOrInternTxId txid sharedAcc - addAdvertiser txEntry@TxEntry { txAdvertisers } = - if Set.member peeraddr txAdvertisers + addAdvertiser k peerAdvertisedKeysAcc txEntry@TxEntry { txAdvertiserCount } = + if IntSet.member k peerAdvertisedKeysAcc then - -- Peer already an advertiser, avoid copying the map - (False, txEntry) + (False, txEntry, peerAdvertisedKeysAcc) else - -- New advertiser, insert and copy - let txAdvertisers' = Set.insert peeraddr txAdvertisers - in (True, txEntry { txAdvertisers = txAdvertisers' }) + ( True + , txEntry { txAdvertiserCount = txAdvertiserCount + 1 } + , IntSet.insert k peerAdvertisedKeysAcc + ) + + lookupOrInternTxId txid st@SharedTxState { sharedTxIdToKey, sharedKeyToTxId, sharedNextTxKey } + | Just key <- Map.lookup txid sharedTxIdToKey = (key, False, st) + | otherwise = + let key = TxKey sharedNextTxKey + in ( key + , True + , st { + sharedTxIdToKey = Map.insert txid key sharedTxIdToKey, + sharedKeyToTxId = IntMap.insert sharedNextTxKey txid sharedKeyToTxId, + sharedNextTxKey = sharedNextTxKey + 1 + } + ) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 982381311f2..0205366e67e 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -152,9 +152,11 @@ data TxAttemptState -- | The current download lease for a tx body. -- -- A tx is either currently leased to a peer until a deadline or it is --- unowned and can be claimed by an eligible advertiser. +-- unowned and became claimable at a specific time. Peers use their decayed +-- score as an additional per-peer delay after that claimable time before +-- attempting to steal the lease. data TxLease peeraddr = TxLeased !peeraddr !Time - | TxClaimable + | TxClaimable !Time deriving stock (Eq, Show, Generic) deriving anyclass NFData @@ -168,11 +170,12 @@ data TxEntry peeraddr = TxEntry { -- | Current owner lease for downloading the tx body. txLease :: !(TxLease peeraddr), - -- | Peers that have advertised this tx. - txAdvertisers :: !(Set.Set peeraddr), - - -- | Stable salt used to break ties between equally scored advertisers. - txTieBreakSalt :: !Int, + -- | Number of peers that still advertise this tx. + -- + -- The actual advertiser membership is tracked per peer in + -- 'SharedPeerState'. The count is retained here so that hot-path scans can + -- stop once all advertisers have been found. + txAdvertiserCount :: !Int, -- | Current per-peer attempt state for this tx body. txAttempts :: !(Map peeraddr TxAttemptState) @@ -234,7 +237,8 @@ data PeerPhase -- | Peer usefulness score. -- --- Lower is better. +-- Lower is better. The current score is also interpreted as milliseconds of +-- extra delay before an idle peer may steal a claimable or expired tx lease. data PeerScore = PeerScore { peerScoreValue :: !Double, peerScoreTs :: !Time @@ -338,9 +342,10 @@ emptyPeerTxLocalState = PeerTxLocalState { -- | Small shared view of peer state used for lease claiming and peer -- selection. data SharedPeerState = SharedPeerState { - sharedPeerPhase :: !PeerPhase, - sharedPeerScore :: !PeerScore, - sharedPeerGeneration :: !Word64 + sharedPeerPhase :: !PeerPhase, + sharedPeerScore :: !PeerScore, + sharedPeerAdvertisedTxKeys :: !IntSet, + sharedPeerGeneration :: !Word64 } deriving stock (Eq, Show, Generic) deriving anyclass NFData diff --git a/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs b/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs index 876d50979df..358a74aae97 100644 --- a/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs +++ b/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs @@ -1830,7 +1830,7 @@ traceSharedTxStateToJSON SharedTxState { length [ () | TxEntry { txLease = TxLeased _ _ } <- activeEntries ] claimableTxCount = - length [ () | TxEntry { txLease = TxClaimable } <- activeEntries ] + length [ () | TxEntry { txLease = TxClaimable _ } <- activeEntries ] resolvedTxCount = 0 :: Int diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 8a9ac6abbeb..eafa405e8de 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -57,12 +57,14 @@ tests = [ testProperty "handleReceivedTxIds inserts new tx entries" prop_handleReceivedTxIds_newEntries , testProperty "handleReceivedTxIds resolves txids already in mempool" prop_handleReceivedTxIds_knownToMempool , testProperty "handleReceivedTxIds keeps retained txids local-only" prop_handleReceivedTxIds_retainedIsLocalOnly + , testCaseSteps "handleReceivedTxIds adds the current peer as an advertiser for active txs" unit_handleReceivedTxIds_addsAdvertiserForActiveTxs , testProperty "handleReceivedTxs buffers received and drops omitted txs" prop_handleReceivedTxs_buffersAndDropsOmitted , testProperty "handleReceivedTxs drops late bodies already retained or in mempool" prop_handleReceivedTxs_dropsLateBodies , testProperty "handleReceivedTxs penalizes omitted txs after full prune" prop_handleReceivedTxs_penalizesOmittedAfterPrune , testProperty "handleSubmittedTxs retains accepted and drops rejected" prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected , testProperty "nextPeerAction prioritises submitting buffered owned txs" prop_nextPeerAction_prioritisesSubmit , testProperty "nextPeerAction claims claimable tx for best idle advertiser" prop_nextPeerAction_claimsClaimableTx + , testCaseSteps "nextPeerAction claims a tx once the score delay threshold has elapsed" unit_nextPeerAction_claimsAtScoreDelayThreshold , testProperty "nextPeerAction steals expired lease for best idle advertiser" prop_nextPeerAction_claimsExpiredLease , testProperty "nextPeerAction requests an oversized first tx within the soft budget" prop_nextPeerAction_requestsOversizedFirstTx , testCaseSteps "nextPeerAction skips blocked available txs and requests later claimable ones" unit_nextPeerAction_skipsBlockedAvailableTxs @@ -79,6 +81,8 @@ tests = , testProperty "PeerDoNothing waits for the earliest shared expiry" prop_nextPeerAction_earliestWakeDelay , testProperty "PeerDoNothing uses the current peer generation" prop_nextPeerAction_returnsPeerGeneration , testProperty "handleSubmittedTxs bumps idle advertiser generations" prop_handleSubmittedTxs_bumpsIdleAdvertisers + , testCaseSteps "advertisingPeersForTxExcept scans the authoritative peer key sets" unit_advertisingPeersForTxExcept_scansAuthoritativePeerSets + , testCaseSteps "removeAdvertisingPeersForResolvedTx clears all advertising peers for a resolved key" unit_removeAdvertisingPeersForResolvedTx_clearsAllAdvertisingPeers , testCaseSteps "updatePeerPhase only wakes the peer becoming idle" unit_updatePeerPhase_wakesOnlyBecomingIdlePeer , testCaseSteps "updatePeerPhase wakes competing idle advertisers when a peer leaves idle" unit_updatePeerPhase_wakesCompetingAdvertisers ] @@ -189,21 +193,30 @@ sharedTxStateInvariant strength SharedTxState { all (\(k, txid) -> Map.lookup txid sharedTxIdToKey == Just (TxKey k)) (IntMap.toList sharedKeyToTxId) - activeEntryLive TxEntry { txLease, txAdvertisers, txAttempts } = + advertisersForKey k = + Map.keysSet $ + Map.filter + (\SharedPeerState { sharedPeerAdvertisedTxKeys } -> + IntSet.member k sharedPeerAdvertisedTxKeys) + sharedPeers + + activeEntryLive TxEntry { txLease, txAdvertiserCount, txAttempts } = leaseLive txLease - || not (Set.null txAdvertisers) + || txAdvertiserCount > 0 || not (Map.null txAttempts) - leaseLive TxClaimable = False + leaseLive TxClaimable {} = False leaseLive TxLeased {} = True - checkTxEntry (k, txEntry@TxEntry { txLease, txAdvertisers, txAttempts }) = + checkTxEntry (k, txEntry@TxEntry { txLease, txAdvertiserCount, txAttempts }) = counterexample ("bad active tx entry " ++ show k ++ ": " ++ show txEntry) $ + let txAdvertisers = advertisersForKey k in conjoin [ property (txAdvertisers `Set.isSubsetOf` knownPeers) + , txAdvertiserCount === Set.size txAdvertisers , property (Map.keysSet txAttempts `Set.isSubsetOf` txAdvertisers) , case txLease of - TxClaimable -> + TxClaimable _ -> property True TxLeased owner _ -> property (Map.member owner sharedPeers && Set.member owner txAdvertisers) @@ -301,25 +314,31 @@ prop_handleReceivedTxIds_newEntries (Positive peeraddr) (ArbSharedTxState shared , StrictSeq.length (peerUnacknowledgedTxIds peerState') === length txidsAndSizes , toList (peerUnacknowledgedTxIds peerState') === fmap (\(txid, _) -> lookupKeyOrFail txid sharedState') txidsAndSizes , IntMap.size (peerAvailableTxIds peerState') === length txidsAndSizes - , sharedPeers sharedState' === sharedPeers sharedState0 - , IntMap.restrictKeys (sharedTxTable sharedState') oldKeys === sharedTxTable sharedState0 - , retainedRestrictKeys (sharedRetainedTxs sharedState') oldKeys === sharedRetainedTxs sharedState0 - , IntMap.restrictKeys (sharedKeyToTxId sharedState') oldKeys === sharedKeyToTxId sharedState0 - , sharedGeneration sharedState' === sharedGeneration sharedState0 + 1 - , sharedNextTxKey sharedState' === sharedNextTxKey sharedState0 + length txidsAndSizes - , conjoin (fmap checkExistingTxId (Map.toList (sharedTxIdToKey sharedState0))) + , Map.delete peeraddr (sharedPeers sharedState') === Map.delete peeraddr (sharedPeers sharedStateBase) + , sharedPeerAdvertisedTxKeys (lookupPeerOrFail peeraddr sharedState') === expectedAdvertisedKeys + , IntMap.restrictKeys (sharedTxTable sharedState') oldKeys === sharedTxTable sharedStateBase + , retainedRestrictKeys (sharedRetainedTxs sharedState') oldKeys === sharedRetainedTxs sharedStateBase + , IntMap.restrictKeys (sharedKeyToTxId sharedState') oldKeys === sharedKeyToTxId sharedStateBase + , sharedGeneration sharedState' === sharedGeneration sharedStateBase + 1 + , sharedNextTxKey sharedState' === sharedNextTxKey sharedStateBase + length txidsAndSizes + , conjoin (fmap checkExistingTxId (Map.toList (sharedTxIdToKey sharedStateBase))) , conjoin (fmap checkEntry txidsAndSizes) ] where + sharedStateBase = ensurePeerAdvertisesTxKeys peeraddr [] sharedState0 txidsAndSizes = - freshBatchAgainstSharedState sharedState0 $ + freshBatchAgainstSharedState sharedStateBase $ dedupeBatch [ (abs txid + 1, mkSize txSize) | (txid, txSize) <- txids0 ] - oldKeys = IntMap.keysSet (sharedKeyToTxId sharedState0) + oldKeys = IntMap.keysSet (sharedKeyToTxId sharedStateBase) requestedToReply = fromIntegral (length txidsAndSizes) peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = requestedToReply + fromIntegral extraRequested } (peerState', sharedState') = - handleReceivedTxIds (const False) now defaultTxDecisionPolicy peeraddr requestedToReply txidsAndSizes peerState0 sharedState0 + handleReceivedTxIds (const False) now defaultTxDecisionPolicy peeraddr requestedToReply txidsAndSizes peerState0 sharedStateBase expectedLeaseUntil = addTime (interTxSpace defaultTxDecisionPolicy) now + expectedAdvertisedKeys = + sharedPeerAdvertisedTxKeys (lookupPeerOrFail peeraddr sharedStateBase) + `IntSet.union` + IntSet.fromList [ unTxKey (lookupKeyOrFail txid sharedState') | (txid, _) <- txidsAndSizes ] checkExistingTxId (txid, txKey) = Map.lookup txid (sharedTxIdToKey sharedState') === Just txKey @@ -327,10 +346,10 @@ prop_handleReceivedTxIds_newEntries (Positive peeraddr) (ArbSharedTxState shared checkEntry (txid, _) = case IntMap.lookup (unTxKey (lookupKeyOrFail txid sharedState')) (sharedTxTable sharedState') of Nothing -> counterexample ("missing tx entry for " ++ show txid) False - Just TxEntry { txLease, txAdvertisers, txAttempts } -> + Just TxEntry { txLease, txAdvertiserCount, txAttempts } -> conjoin [ txLease === TxLeased peeraddr expectedLeaseUntil - , txAdvertisers === Set.singleton peeraddr + , txAdvertiserCount === 1 , txAttempts === Map.empty ] @@ -356,8 +375,9 @@ prop_handleReceivedTxIds_knownToMempool (Positive peeraddr) txid0 txSize0 = txSize = mkSize txSize0 requestedToReply = 1 peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = requestedToReply } + sharedState0 = ensurePeerAdvertisesTxKeys peeraddr [] emptySharedTxState (peerState', sharedState') = - handleReceivedTxIds (== txid) now defaultTxDecisionPolicy peeraddr requestedToReply [(txid, txSize)] peerState0 emptySharedTxState + handleReceivedTxIds (== txid) now defaultTxDecisionPolicy peeraddr requestedToReply [(txid, txSize)] peerState0 sharedState0 key = lookupKeyOrFail txid sharedState' expectedRetainUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now @@ -382,7 +402,8 @@ prop_handleReceivedTxIds_retainedIsLocalOnly (Positive peeraddr) txid0 txSize0 = k = unTxKey key retainUntil = addTime 17 now peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = 1 } - sharedState0 = emptySharedTxState + sharedState0 = ensurePeerAdvertisesTxKeys peeraddr [] $ + emptySharedTxState { sharedRetainedTxs = retainedSingleton k retainUntil , sharedTxIdToKey = Map.singleton txid key , sharedKeyToTxId = IntMap.singleton k txid @@ -392,6 +413,49 @@ prop_handleReceivedTxIds_retainedIsLocalOnly (Positive peeraddr) txid0 txSize0 = (peerState', sharedState') = handleReceivedTxIds (const False) now defaultTxDecisionPolicy peeraddr 1 [(txid, txSize)] peerState0 sharedState0 +unit_handleReceivedTxIds_addsAdvertiserForActiveTxs :: (String -> IO ()) -> Assertion +unit_handleReceivedTxIds_addsAdvertiserForActiveTxs step = do + step "Run handleReceivedTxIds for a peer advertising txids that are already active via other peers" + let (peerState', sharedState') = + handleReceivedTxIds + (const False) + now + defaultTxDecisionPolicy + peeraddr + requestedTxIds + txidsAndSizes + peerState0 + sharedState0 + step "Assert the local peer now tracks the txids as unacknowledged and available" + toList (peerUnacknowledgedTxIds peerState') @?= txKeys + peerAvailableTxIds peerState' @?= expectedAvailableTxs + step "Assert the shared peer view and advertiser counts were updated once per tx" + sharedPeerAdvertisedTxKeys (lookupPeerOrFail peeraddr sharedState') @?= expectedAdvertisedKeys + map (txAdvertiserCount . (`lookupEntryOrFail` sharedState')) txKeys + @?= map ((+ 1) . txAdvertiserCount . (`lookupEntryOrFail` sharedState0)) txKeys + step "Assert unrelated peers and shared mappings are unchanged apart from the generation bump" + Map.delete peeraddr (sharedPeers sharedState') @?= Map.delete peeraddr (sharedPeers sharedState0) + sharedTxIdToKey sharedState' @?= sharedTxIdToKey sharedState0 + sharedKeyToTxId sharedState' @?= sharedKeyToTxId sharedState0 + sharedGeneration sharedState' @?= sharedGeneration sharedState0 + 1 + where + ReceiveDuplicateFixture + { rdfPeerAddr = peeraddr + , rdfRequestedTxIds = requestedTxIds + , rdfTxidsAndSizes = txidsAndSizes + , rdfPeerState = peerState0 + , rdfSharedState = sharedState0 + } = mkReceiveDuplicateFixture 4 3 + + txKeys = fmap (`lookupKeyOrFail` sharedState0) (fmap fst txidsAndSizes) + expectedAvailableTxs = + IntMap.fromList + [ (unTxKey txKey, txSize) + | (txid, txSize) <- txidsAndSizes + , let txKey = lookupKeyOrFail txid sharedState0 + ] + expectedAdvertisedKeys = IntSet.fromList (map unTxKey txKeys) + -- Verifies that handleReceivedTxs buffers received bodies and removes omitted -- requested txs from peer and shared state. prop_handleReceivedTxs_buffersAndDropsOmitted @@ -427,12 +491,13 @@ prop_handleReceivedTxs_buffersAndDropsOmitted (Positive peeraddr) txidA0 txidB0 let st = mkSharedState [txidA, txidB] keyA' = lookupKeyOrFail txidA st keyB' = lookupKeyOrFail txidB st in - st { - sharedTxTable = IntMap.fromList - [ (unTxKey keyA', mkTxEntry peeraddr txSizeA (Just TxDownloading)) - , (unTxKey keyB', mkTxEntry peeraddr txSizeB (Just TxDownloading)) - ] - } + ensurePeerAdvertisesTxKeys peeraddr [keyA', keyB'] $ + st { + sharedTxTable = IntMap.fromList + [ (unTxKey keyA', mkTxEntry peeraddr txSizeA (Just TxDownloading)) + , (unTxKey keyB', mkTxEntry peeraddr txSizeB (Just TxDownloading)) + ] + } keyA = lookupKeyOrFail txidA sharedState0 keyB = lookupKeyOrFail txidB sharedState0 kA = unTxKey keyA @@ -482,9 +547,10 @@ prop_handleReceivedTxs_dropsLateBodies (Positive peeraddr) txid0 txSize0 = sharedStateBase = let st = mkSharedState [txid] key' = lookupKeyOrFail txid st in - st { - sharedTxTable = IntMap.singleton (unTxKey key') (mkTxEntry peeraddr txSize Nothing) - } + ensurePeerAdvertisesTxKeys peeraddr [key'] $ + st { + sharedTxTable = IntMap.singleton (unTxKey key') (mkTxEntry peeraddr txSize Nothing) + } key = lookupKeyOrFail txid sharedStateBase k = unTxKey key retainedUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now @@ -533,9 +599,10 @@ prop_handleReceivedTxs_penalizesOmittedAfterPrune (Positive peeraddr) txid0 txSi sharedStateBase = let st = mkSharedState [txid] key' = lookupKeyOrFail txid st in - st { - sharedTxTable = IntMap.singleton (unTxKey key') (mkTxEntry peeraddr txSize (Just TxDownloading)) - } + ensurePeerAdvertisesTxKeys peeraddr [key'] $ + st { + sharedTxTable = IntMap.singleton (unTxKey key') (mkTxEntry peeraddr txSize (Just TxDownloading)) + } key = lookupKeyOrFail txid sharedStateBase k = unTxKey key peerState0 = emptyPeerTxLocalState @@ -587,12 +654,13 @@ prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected (Positive peeraddr) txid let st = mkSharedState [txidA, txidB] keyA' = lookupKeyOrFail txidA st keyB' = lookupKeyOrFail txidB st in - st { - sharedTxTable = IntMap.fromList - [ (unTxKey keyA', mkTxEntry peeraddr txSizeA (Just TxBuffered)) - , (unTxKey keyB', mkTxEntry peeraddr txSizeB (Just TxBuffered)) - ] - } + ensurePeerAdvertisesTxKeys peeraddr [keyA', keyB'] $ + st { + sharedTxTable = IntMap.fromList + [ (unTxKey keyA', mkTxEntry peeraddr txSizeA (Just TxBuffered)) + , (unTxKey keyB', mkTxEntry peeraddr txSizeB (Just TxBuffered)) + ] + } keyA = lookupKeyOrFail txidA sharedState0 keyB = lookupKeyOrFail txidB sharedState0 kA = unTxKey keyA @@ -624,11 +692,12 @@ prop_nextPeerAction_prioritisesSubmit (Positive peeraddr) txid0 txSize0 = key = TxKey 0 k = unTxKey key sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + { sharedPeers = + Map.singleton peeraddr + (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertisers = Set.singleton peeraddr - , txTieBreakSalt = txid + , txAdvertiserCount = 1 , txAttempts = Map.singleton peeraddr TxBuffered } , sharedTxIdToKey = Map.singleton txid key @@ -673,27 +742,58 @@ prop_nextPeerAction_claimsClaimableTx (Positive peerA0) (Positive peerB0) (Posit k = unTxKey key sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (peerA, mkSharedPeerState PeerIdle (PeerScore 1 now)) - , (peerB, mkSharedPeerState PeerIdle (PeerScore 10 now)) - , (peerC, mkSharedPeerState PeerWaitingTxs (PeerScore 0 now)) + [ (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (PeerScore 1 now))) + , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (PeerScore 10 now))) + , (peerC, withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs (PeerScore 0 now))) ] , sharedTxIdToKey = Map.singleton txid key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxClaimable - , txAdvertisers = mkAdvertisers - [ (peerA, AckWhenResolved) - , (peerB, AckWhenResolved) - , (peerC, AckWhenResolved) - ] - , txTieBreakSalt = txid + { txLease = TxClaimable (Time 0) + , txAdvertiserCount = 3 , txAttempts = Map.empty } } peerState0 = emptyPeerTxLocalState { peerAvailableTxIds = IntMap.singleton k txSize } (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peerA peerState0 sharedState0 +unit_nextPeerAction_claimsAtScoreDelayThreshold :: (String -> IO ()) -> Assertion +unit_nextPeerAction_claimsAtScoreDelayThreshold step = do + step "Run nextPeerAction for a peer whose score contributes exactly a 1 ms claim delay" + case peerAction of + PeerRequestTxs txKeys -> do + step "Assert the tx becomes claimable once the peerScore / 20 ms threshold has elapsed" + txKeys @?= [key] + peerRequestedTxs peerState' @?= IntSet.singleton k + txLease (lookupEntryOrFail key sharedState') @?= + TxLeased peeraddr (addTime (interTxSpace defaultTxDecisionPolicy) now) + _ -> + assertFailure ("unexpected peer action: " ++ show peerAction) + where + peeraddr = 7 + txid = 1 + txSize = mkSize (Positive 10) + key = TxKey 0 + k = unTxKey key + claimableAt = Time 99.999 + sharedState0 = emptySharedTxState + { sharedPeers = + Map.singleton peeraddr + (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (PeerScore 20 now))) + , sharedTxIdToKey = Map.singleton txid key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxClaimable claimableAt + , txAdvertiserCount = 1 + , txAttempts = Map.empty + } + } + peerState0 = emptyPeerTxLocalState { peerAvailableTxIds = IntMap.singleton k txSize } + (peerAction, peerState', sharedState') = + nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + -- Verifies that nextPeerAction can steal an expired lease for the best idle -- advertiser and request that tx. prop_nextPeerAction_claimsExpiredLease @@ -725,21 +825,16 @@ prop_nextPeerAction_claimsExpiredLease (Positive oldOwner0) (Positive peerA0) (P k = unTxKey key sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (oldOwner, mkSharedPeerState PeerWaitingTxs (PeerScore 0 now)) - , (peerA, mkSharedPeerState PeerIdle (PeerScore 1 now)) - , (peerB, mkSharedPeerState PeerIdle (PeerScore 10 now)) + [ (oldOwner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs (PeerScore 0 now))) + , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (PeerScore 1 now))) + , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (PeerScore 10 now))) ] , sharedTxIdToKey = Map.singleton txid key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased oldOwner (Time 0) - , txAdvertisers = mkAdvertisers - [ (oldOwner, AckWhenResolved) - , (peerA, AckWhenResolved) - , (peerB, AckWhenResolved) - ] - , txTieBreakSalt = txid + , txAdvertiserCount = 3 , txAttempts = Map.empty } } @@ -774,7 +869,8 @@ prop_nextPeerAction_requestsOversizedFirstTx (Positive peeraddr) txid0 (Positive , maxOutstandingTxBatchesPerPeer = 1 } sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + { sharedPeers = Map.singleton peeraddr + (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) , sharedTxIdToKey = Map.singleton txid key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 @@ -805,6 +901,8 @@ unit_nextPeerAction_skipsBlockedAvailableTxs step = do policy = defaultTxDecisionPolicy peeraddr = 7 :: PeerAddr otherPeer = 8 :: PeerAddr + blockedKey = TxKey 1 + claimableKey = TxKey 2 kBlocked = 1 kClaimable = 2 peerState = emptyPeerTxLocalState @@ -813,20 +911,20 @@ unit_nextPeerAction_skipsBlockedAvailableTxs step = do sharedState :: SharedTxState PeerAddr TxId sharedState = emptySharedTxState { sharedPeers = Map.fromList - [ (peeraddr, SharedPeerState PeerIdle (emptyPeerScore testNow) 0) - , (otherPeer, SharedPeerState PeerIdle (emptyPeerScore testNow) 0) + [ (peeraddr, withAdvertisedTxKeys [blockedKey, claimableKey] + (mkSharedPeerState PeerIdle (emptyPeerScore testNow))) + , (otherPeer, withAdvertisedTxKeys [blockedKey] + (mkSharedPeerState PeerIdle (emptyPeerScore testNow))) ] , sharedTxTable = IntMap.fromList [ (kBlocked, TxEntry { txLease = TxLeased otherPeer (addTime 10 testNow) - , txAdvertisers = mkAdvertisers [(peeraddr, AckWhenResolved), (otherPeer, AckWhenResolved)] - , txTieBreakSalt = 0 + , txAdvertiserCount = 2 , txAttempts = Map.empty }) , (kClaimable, TxEntry - { txLease = TxClaimable - , txAdvertisers = Set.singleton peeraddr - , txTieBreakSalt = 0 + { txLease = TxClaimable (Time 0) + , txAdvertiserCount = 1 , txAttempts = Map.empty }) ] @@ -857,11 +955,12 @@ prop_nextPeerAction_ownerSubmitsBuffered (Positive peeraddr) txid0 txSize0 = key = TxKey 0 k = unTxKey key sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + { sharedPeers = + Map.singleton peeraddr + (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxClaimable - , txAdvertisers = Set.singleton peeraddr - , txTieBreakSalt = txid + { txLease = TxClaimable (Time 0) + , txAdvertiserCount = 1 , txAttempts = Map.singleton peeraddr TxBuffered } , sharedTxIdToKey = Map.singleton txid key @@ -906,11 +1005,7 @@ unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx step = do blockedTx = mkTx blockedTxid blockedSize blockedEntry = TxEntry { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertisers = mkAdvertisers - [ (peeraddr, AckWhenBuffered) - , (submittingPeer, AckWhenResolved) - ] - , txTieBreakSalt = blockedTxid + , txAdvertiserCount = 2 , txAttempts = Map.fromList [ (peeraddr, TxBuffered) , (submittingPeer, TxSubmitting) @@ -918,15 +1013,16 @@ unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx step = do } sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (peeraddr, mkSharedPeerState PeerIdle (emptyPeerScore now)) - , (submittingPeer, mkSharedPeerState PeerSubmittingToMempool (emptyPeerScore now)) + [ (peeraddr, withAdvertisedTxKeys [blockedKey, claimableKey] + (mkSharedPeerState PeerIdle (emptyPeerScore now))) + , (submittingPeer, withAdvertisedTxKeys [blockedKey] + (mkSharedPeerState PeerSubmittingToMempool (emptyPeerScore now))) ] , sharedTxTable = IntMap.fromList [ (kBlocked, blockedEntry) , (kClaimable, TxEntry - { txLease = TxClaimable - , txAdvertisers = Set.singleton peeraddr - , txTieBreakSalt = claimableTxid + { txLease = TxClaimable (Time 0) + , txAdvertiserCount = 1 , txAttempts = Map.empty }) ] @@ -963,7 +1059,8 @@ unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx step = do assertBool ("expected positive txIdsToReq, got " ++ show txIdsToReq) (txIdsToReq > 0) peerUnacknowledgedTxIds peerState' @?= StrictSeq.singleton blockedKey peerRequestedTxIds peerState' @?= txIdsToReq - txAdvertisers (lookupEntryOrFail blockedKey sharedState') @?= txAdvertisers blockedEntry + txAdvertiserCount (lookupEntryOrFail blockedKey sharedState') @?= + txAdvertiserCount blockedEntry other -> assertFailure ("unexpected action: " ++ show other) where @@ -979,11 +1076,7 @@ unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx step = do blockedTx = mkTx blockedTxid blockedSize blockedEntry = TxEntry { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertisers = mkAdvertisers - [ (peeraddr, AckWhenBuffered) - , (submittingPeer, AckWhenResolved) - ] - , txTieBreakSalt = blockedTxid + , txAdvertiserCount = 2 , txAttempts = Map.fromList [ (peeraddr, TxBuffered) , (submittingPeer, TxSubmitting) @@ -991,8 +1084,10 @@ unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx step = do } sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (peeraddr, mkSharedPeerState PeerIdle (emptyPeerScore now)) - , (submittingPeer, mkSharedPeerState PeerSubmittingToMempool (emptyPeerScore now)) + [ (peeraddr, withAdvertisedTxKeys [blockedKey] + (mkSharedPeerState PeerIdle (emptyPeerScore now))) + , (submittingPeer, withAdvertisedTxKeys [blockedKey] + (mkSharedPeerState PeerSubmittingToMempool (emptyPeerScore now))) ] , sharedTxTable = IntMap.singleton kBlocked blockedEntry , sharedRetainedTxs = retainedSingleton kResolved (addTime 17 now) @@ -1047,16 +1142,14 @@ prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peera key = TxKey 0 k = unTxKey key sharedPeers0 = Map.fromList - [ (owner, mkSharedPeerState PeerIdle (emptyPeerScore now)) - , (peeraddr, mkSharedPeerState PeerIdle (emptyPeerScore now)) + [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + , (peeraddr, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) ] - txAdvertisers0 = mkAdvertisers [(owner, AckWhenBuffered), (peeraddr, AckWhenResolved)] unresolvedSharedState = emptySharedTxState { sharedPeers = sharedPeers0 , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxClaimable - , txAdvertisers = txAdvertisers0 - , txTieBreakSalt = txid + { txLease = TxClaimable (Time 0) + , txAdvertiserCount = 2 , txAttempts = Map.singleton owner TxBuffered } , sharedTxIdToKey = Map.singleton txid key @@ -1075,7 +1168,8 @@ prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peera unresolvedExpectations = conjoin [ peerUnacknowledgedTxIds unresolvedPeerState' === peerUnacknowledgedTxIds peerState0 - , txAdvertisers (lookupEntryOrFail key unresolvedSharedState') === txAdvertisers (lookupEntryOrFail key unresolvedSharedState) + , txAdvertiserCount (lookupEntryOrFail key unresolvedSharedState') === + txAdvertiserCount (lookupEntryOrFail key unresolvedSharedState) ] (resolvedAction, resolvedPeerState', _) = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 resolvedSharedState @@ -1191,13 +1285,14 @@ prop_nextPeerActionPipelined_secondBodyBatch (Positive peeraddr) txidA0 txidB0 t , peerRequestedTxsSize = txSizeA } sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + { sharedPeers = + Map.singleton peeraddr + (withAdvertisedTxKeys [keyA, keyB] (mkSharedPeerState PeerIdle (emptyPeerScore now))) , sharedTxTable = IntMap.fromList [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading)) , (kB, TxEntry - { txLease = TxClaimable - , txAdvertisers = Set.singleton peeraddr - , txTieBreakSalt = txidB + { txLease = TxClaimable (Time 0) + , txAdvertiserCount = 1 , txAttempts = Map.empty }) ] @@ -1253,14 +1348,15 @@ prop_nextPeerActionPipelined_noThirdBodyBatch (Positive peeraddr) txidA0 txidB0 , peerRequestedTxsSize = txSizeA + txSizeB } sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + { sharedPeers = + Map.singleton peeraddr + (withAdvertisedTxKeys [keyA, keyB, keyC] (mkSharedPeerState PeerIdle (emptyPeerScore now))) , sharedTxTable = IntMap.fromList [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading)) , (kB, mkTxEntry peeraddr txSizeB (Just TxDownloading)) , (kC, TxEntry - { txLease = TxClaimable - , txAdvertisers = Set.singleton peeraddr - , txTieBreakSalt = txidC + { txLease = TxClaimable (Time 0) + , txAdvertiserCount = 1 , txAttempts = Map.empty }) ] @@ -1372,15 +1468,14 @@ prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txid keyB = TxKey 1 idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } sharedPeers0 = Map.fromList - [ (peeraddr, mkSharedPeerState PeerIdle (emptyPeerScore now)) - , (owner, mkSharedPeerState PeerWaitingTxs (emptyPeerScore now)) + [ (peeraddr, withAdvertisedTxKeys [keyA] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + , (owner, withAdvertisedTxKeys [keyA] (mkSharedPeerState PeerWaitingTxs (emptyPeerScore now))) ] leaseFirstState = emptySharedTxState { sharedPeers = sharedPeers0 , sharedTxTable = IntMap.singleton (unTxKey keyA) TxEntry { txLease = TxLeased owner leaseUntil - , txAdvertisers = mkAdvertisers [(owner, AckWhenBuffered), (peeraddr, AckWhenResolved)] - , txTieBreakSalt = txidA + , txAdvertiserCount = 2 , txAttempts = Map.empty } , sharedRetainedTxs = retainedSingleton (unTxKey keyB) retainUntilLater @@ -1392,8 +1487,7 @@ prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txid { sharedPeers = sharedPeers0 , sharedTxTable = IntMap.singleton (unTxKey keyA) TxEntry { txLease = TxLeased owner leaseUntilLater - , txAdvertisers = mkAdvertisers [(owner, AckWhenBuffered), (peeraddr, AckWhenResolved)] - , txTieBreakSalt = txidA + , txAdvertiserCount = 2 , txAttempts = Map.empty } , sharedRetainedTxs = retainedSingleton (unTxKey keyB) retainUntilSoon @@ -1460,18 +1554,13 @@ prop_handleSubmittedTxs_bumpsIdleAdvertisers (Positive owner0) (Positive peerA0) k = unTxKey key sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (owner, mkSharedPeerState PeerSubmittingToMempool (emptyPeerScore now)) - , (peerA, mkSharedPeerState PeerIdle (emptyPeerScore now)) - , (peerB, mkSharedPeerState PeerWaitingTxs (emptyPeerScore now)) + [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerSubmittingToMempool (emptyPeerScore now))) + , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs (emptyPeerScore now))) ] , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased owner (addTime 10 now) - , txAdvertisers = mkAdvertisers - [ (owner, AckWhenBuffered) - , (peerA, AckWhenResolved) - , (peerB, AckWhenResolved) - ] - , txTieBreakSalt = txid + , txAdvertiserCount = 3 , txAttempts = Map.singleton owner TxBuffered } , sharedTxIdToKey = Map.singleton txid key @@ -1481,6 +1570,76 @@ prop_handleSubmittedTxs_bumpsIdleAdvertisers (Positive owner0) (Positive peerA0) peerState0 = emptyPeerTxLocalState { peerDownloadedTxs = IntMap.singleton k tx } (_, sharedState') = handleSubmittedTxs now defaultTxDecisionPolicy owner [key] [] peerState0 sharedState0 +unit_advertisingPeersForTxExcept_scansAuthoritativePeerSets :: (String -> IO ()) -> Assertion +unit_advertisingPeersForTxExcept_scansAuthoritativePeerSets step = do + step "Build a shared state whose per-tx advertiser count is stale-low" + let advertisingPeers = + advertisingPeersForTxExcept owner key sharedState0 + step "Assert all peers advertising the key are found from the authoritative per-peer key sets" + advertisingPeers @?= Set.fromList [peerA, peerB] + where + owner, peerA, peerB, unrelatedPeer :: PeerAddr + owner = 1 + peerA = 2 + peerB = 3 + unrelatedPeer = 4 + txid :: TxId + txid = 1 + baseState = mkSharedState [txid] + key = lookupKeyOrFail txid baseState + k = unTxKey key + sharedState0 = baseState + { sharedPeers = Map.fromList + [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + , (unrelatedPeer, mkSharedPeerState PeerIdle (emptyPeerScore now)) + ] + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxLeased owner (addTime 10 now) + , txAdvertiserCount = 1 + , txAttempts = Map.empty + } + } + +unit_removeAdvertisingPeersForResolvedTx_clearsAllAdvertisingPeers :: (String -> IO ()) -> Assertion +unit_removeAdvertisingPeersForResolvedTx_clearsAllAdvertisingPeers step = do + step "Remove a resolved tx key from all advertising peers" + let (sharedState', advertisers) = + removeAdvertisingPeersForResolvedTx key sharedState0 + step "Assert all advertising peers are returned even when the cached count is stale-low" + advertisers @?= Set.fromList [owner, peerA, peerB] + step "Assert the resolved key is cleared from every advertising peer and unrelated peers are unchanged" + sharedPeerAdvertisedTxKeys (lookupPeerOrFail owner sharedState') @?= IntSet.empty + sharedPeerAdvertisedTxKeys (lookupPeerOrFail peerA sharedState') @?= IntSet.empty + sharedPeerAdvertisedTxKeys (lookupPeerOrFail peerB sharedState') @?= IntSet.empty + sharedPeerAdvertisedTxKeys (lookupPeerOrFail unrelatedPeer sharedState') @?= IntSet.empty + sharedTxTable sharedState' @?= sharedTxTable sharedState0 + where + owner, peerA, peerB, unrelatedPeer :: PeerAddr + owner = 1 + peerA = 2 + peerB = 3 + unrelatedPeer = 4 + txid :: TxId + txid = 1 + baseState = mkSharedState [txid] + key = lookupKeyOrFail txid baseState + k = unTxKey key + sharedState0 = baseState + { sharedPeers = Map.fromList + [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + , (unrelatedPeer, mkSharedPeerState PeerIdle (emptyPeerScore now)) + ] + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxLeased owner (addTime 10 now) + , txAdvertiserCount = 1 + , txAttempts = Map.empty + } + } + unit_updatePeerPhase_wakesOnlyBecomingIdlePeer :: (String -> IO ()) -> Assertion unit_updatePeerPhase_wakesOnlyBecomingIdlePeer step = do step "Update a peer from waiting to idle" @@ -1503,9 +1662,9 @@ unit_updatePeerPhase_wakesCompetingAdvertisers :: (String -> IO ()) -> Assertion unit_updatePeerPhase_wakesCompetingAdvertisers step = do step "Update an idle peer to a waiting phase" sharedPeerPhase (lookupPeerOrFail leavingPeer sharedState') @?= PeerWaitingTxs - step "Assert competing idle advertisers are woken but unrelated peers are not" + step "Assert no competing advertisers are woken by leaving idle under score-delay claiming" sharedPeerGeneration (lookupPeerOrFail leavingPeer sharedState') @?= 5 - sharedPeerGeneration (lookupPeerOrFail competingPeer sharedState') @?= 12 + sharedPeerGeneration (lookupPeerOrFail competingPeer sharedState') @?= 11 sharedPeerGeneration (lookupPeerOrFail unrelatedPeer sharedState') @?= 17 where leavingPeer = 1 @@ -1517,17 +1676,13 @@ unit_updatePeerPhase_wakesCompetingAdvertisers step = do k = unTxKey key sharedState0 = baseState { sharedPeers = Map.fromList - [ (leavingPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 5 }) - , (competingPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 11 }) + [ (leavingPeer, (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) { sharedPeerGeneration = 5 }) + , (competingPeer, (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) { sharedPeerGeneration = 11 }) , (unrelatedPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 17 }) ] , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxClaimable - , txAdvertisers = mkAdvertisers - [ (leavingPeer, AckWhenResolved) - , (competingPeer, AckWhenResolved) - ] - , txTieBreakSalt = txid + { txLease = TxClaimable (Time 0) + , txAdvertiserCount = 2 , txAttempts = Map.empty } } @@ -1543,6 +1698,7 @@ genSharedPeerState = do pure SharedPeerState { sharedPeerPhase, sharedPeerScore = PeerScore peerScoreValue peerScoreTs, + sharedPeerAdvertisedTxKeys = IntSet.empty, sharedPeerGeneration } @@ -1659,17 +1815,17 @@ genSharedTxState = sized $ \n -> do pure (txid, retainUntil) -- Generate one active tx entry using a mix of leased and claimable shapes. -genActiveTxEntry :: [PeerAddr] -> TxId -> Gen (TxId, TxEntry PeerAddr) +genActiveTxEntry :: [PeerAddr] -> TxId -> Gen (TxId, Set.Set PeerAddr, TxEntry PeerAddr) genActiveTxEntry peeraddrs txid = do - txEntry <- frequency + (txAdvertisers, txEntry) <- frequency [ (5, genLeasedTxEntry peeraddrs txid) , (3, genClaimableTxEntry peeraddrs txid) ] - pure (txid, txEntry) + pure (txid, txAdvertisers, txEntry) -- Generate a leased entry where the owner may already be downloading, buffered, or submitting. -genLeasedTxEntry :: [PeerAddr] -> TxId -> Gen (TxEntry PeerAddr) -genLeasedTxEntry peeraddrs txid = do +genLeasedTxEntry :: [PeerAddr] -> TxId -> Gen (Set.Set PeerAddr, TxEntry PeerAddr) +genLeasedTxEntry peeraddrs _txid = do advertiserPeers <- genNonEmptySublist peeraddrs owner <- elements advertiserPeers txAdvertisers <- genOwnedAdvertisers advertiserPeers owner @@ -1679,24 +1835,29 @@ genLeasedTxEntry peeraddrs txid = do , (2, Just <$> elements [TxDownloading, TxBuffered]) , (1, pure (Just TxSubmitting)) ] - pure TxEntry { - txLease, - txAdvertisers, - txTieBreakSalt = txid, - txAttempts = maybe Map.empty (Map.singleton owner) ownerAttempt - } + pure + ( txAdvertisers + , TxEntry { + txLease, + txAdvertiserCount = Set.size txAdvertisers, + txAttempts = maybe Map.empty (Map.singleton owner) ownerAttempt + } + ) -- Generate a claimable entry advertised by one or more resolved peers. -genClaimableTxEntry :: [PeerAddr] -> TxId -> Gen (TxEntry PeerAddr) -genClaimableTxEntry peeraddrs txid = do +genClaimableTxEntry :: [PeerAddr] -> TxId -> Gen (Set.Set PeerAddr, TxEntry PeerAddr) +genClaimableTxEntry peeraddrs _txid = do advertiserPeers <- genNonEmptySublist peeraddrs txAdvertisers <- genResolvedAdvertisers advertiserPeers - pure TxEntry { - txLease = TxClaimable, - txAdvertisers, - txTieBreakSalt = txid, - txAttempts = Map.empty - } + claimableAt <- genSharedExpiryTime + pure + ( txAdvertisers + , TxEntry { + txLease = TxClaimable claimableAt, + txAdvertiserCount = Set.size txAdvertisers, + txAttempts = Map.empty + } + ) -- Generate the advertiser set for an entry owned by the chosen peer. genOwnedAdvertisers @@ -1714,17 +1875,17 @@ genResolvedAdvertisers advertiserPeers = -- Rebuild a shared state from tx-centric fixtures while preserving interned keys. buildSharedTxState :: Map.Map PeerAddr PeerSeed - -> [(TxId, TxEntry PeerAddr)] + -> [(TxId, Set.Set PeerAddr, TxEntry PeerAddr)] -> [(TxId, Time)] -> Word64 -> SharedTxState PeerAddr TxId buildSharedTxState peerSeeds activeEntries retainedEntries sharedGeneration = baseState { - sharedPeers = deriveSharedPeers peerSeeds activeEntries, + sharedPeers = deriveSharedPeers baseState peerSeeds activeEntries, sharedTxTable = IntMap.fromList [ (unTxKey (lookupKeyOrFail txid baseState), txEntry) - | (txid, txEntry) <- activeEntries + | (txid, _, txEntry) <- activeEntries ], sharedRetainedTxs = retainedFromList @@ -1734,22 +1895,31 @@ buildSharedTxState peerSeeds activeEntries retainedEntries sharedGeneration = sharedGeneration } where - baseState = mkSharedState (fmap fst activeEntries <> fmap fst retainedEntries) + baseState = + mkSharedState ([ txid | (txid, _, _) <- activeEntries ] <> fmap fst retainedEntries) -- Derive peer phases from the generated tx entries. deriveSharedPeers - :: Map.Map PeerAddr PeerSeed - -> [(TxId, TxEntry PeerAddr)] + :: SharedTxState PeerAddr TxId + -> Map.Map PeerAddr PeerSeed + -> [(TxId, Set.Set PeerAddr, TxEntry PeerAddr)] -> Map.Map PeerAddr SharedPeerState -deriveSharedPeers peerSeeds activeEntries = +deriveSharedPeers baseState peerSeeds activeEntries = Map.mapWithKey buildPeerState completePeerSeeds where completePeerSeeds = - foldl' addMissingPeerSeed peerSeeds (concatMap (entryPeers . snd) activeEntries) + foldl' addMissingPeerSeed peerSeeds (concatMap entryPeers activeEntries) peerUsages = foldl' accumulatePeerUsage Map.empty activeEntries + peerAdvertisedKeys = + Map.fromListWith IntSet.union + [ (peeraddr, IntSet.singleton (unTxKey (lookupKeyOrFail txid baseState))) + | (txid, txAdvertisers, _) <- activeEntries + , peeraddr <- Set.toList txAdvertisers + ] + addMissingPeerSeed acc peeraddr = Map.insertWith (\_ old -> old) peeraddr defaultPeerSeed acc @@ -1765,6 +1935,8 @@ deriveSharedPeers peerSeeds activeEntries = SharedPeerState { sharedPeerPhase, sharedPeerScore = peerSeedScore, + sharedPeerAdvertisedTxKeys = + Map.findWithDefault IntSet.empty peeraddr peerAdvertisedKeys, sharedPeerGeneration = peerSeedGeneration } @@ -1785,9 +1957,9 @@ emptyPeerDerivedUsage = -- Fold one tx entry's attempts into the derived per-peer usage map. accumulatePeerUsage :: Map.Map PeerAddr PeerDerivedUsage - -> (TxId, TxEntry PeerAddr) + -> (TxId, Set.Set PeerAddr, TxEntry PeerAddr) -> Map.Map PeerAddr PeerDerivedUsage -accumulatePeerUsage acc (_, TxEntry { txAttempts }) = +accumulatePeerUsage acc (_, _, TxEntry { txAttempts }) = foldl' step acc (Map.toList txAttempts) where step acc' (peeraddr, attempt) = @@ -1820,14 +1992,14 @@ updatePeerUsage peeraddr submitting hasRequestedTxs acc = } -- Collect every peer mentioned by a tx entry. -entryPeers :: TxEntry PeerAddr -> [PeerAddr] -entryPeers TxEntry { txLease, txAdvertisers, txAttempts } = +entryPeers :: (TxId, Set.Set PeerAddr, TxEntry PeerAddr) -> [PeerAddr] +entryPeers (_, txAdvertisers, TxEntry { txLease, txAttempts }) = leaseOwner <> Set.toList txAdvertisers <> Map.keys txAttempts where leaseOwner = case txLease of TxLeased owner _ -> [owner] - TxClaimable -> [] + TxClaimable _ -> [] -- Shrink shared state by dropping active txs, retained txs, or unused peers. shrinkSharedTxState @@ -1848,7 +2020,10 @@ shrinkSharedTxState sharedState = ] where activeEntries = - [ (resolveTxKey sharedState (TxKey k), txEntry) + [ ( resolveTxKey sharedState (TxKey k) + , advertisersForKey k + , txEntry + ) | (k, txEntry) <- IntMap.toList (sharedTxTable sharedState) ] retainedEntries = @@ -1864,7 +2039,7 @@ shrinkSharedTxState sharedState = }) (sharedPeers sharedState) usedPeers = - foldl' (\peers (_, txEntry) -> peers <> entryPeers txEntry) [] activeEntries + foldl' (\peers activeEntry -> peers <> entryPeers activeEntry) [] activeEntries usedPeerSeeds = Map.filterWithKey (\peeraddr _ -> peeraddr `elem` usedPeers) peerSeeds smallerActiveEntries = @@ -1879,6 +2054,12 @@ shrinkSharedTxState sharedState = | retainedEntries' <- shrinkList (const []) retainedEntries , length retainedEntries' < length retainedEntries ] + advertisersForKey k = + Map.keysSet $ + Map.filter + (\SharedPeerState { sharedPeerAdvertisedTxKeys } -> + IntSet.member k sharedPeerAdvertisedTxKeys) + (sharedPeers sharedState) -- Partition requested keys into a small number of contiguous request batches. genRequestedTxBatches @@ -1983,17 +2164,42 @@ mkSharedPeerState sharedPeerPhase sharedPeerScore = SharedPeerState { sharedPeerPhase, sharedPeerScore, + sharedPeerAdvertisedTxKeys = IntSet.empty, sharedPeerGeneration = 0 } +withAdvertisedTxKeys :: [TxKey] -> SharedPeerState -> SharedPeerState +withAdvertisedTxKeys txKeys sharedPeerState = + sharedPeerState { + sharedPeerAdvertisedTxKeys = IntSet.fromList (map unTxKey txKeys) + } + +ensurePeerAdvertisesTxKeys + :: PeerAddr + -> [TxKey] + -> SharedTxState PeerAddr TxId + -> SharedTxState PeerAddr TxId +ensurePeerAdvertisesTxKeys peeraddr txKeys st@SharedTxState { sharedPeers } = + st { + sharedPeers = + Map.alter updatePeer peeraddr sharedPeers + } + where + advertisedKeys = IntSet.fromList (map unTxKey txKeys) + + updatePeer Nothing = + Just (withAdvertisedTxKeys txKeys (mkSharedPeerState PeerIdle (emptyPeerScore now))) + updatePeer (Just sharedPeerState) = + Just + (sharedPeerState { + sharedPeerAdvertisedTxKeys = + sharedPeerAdvertisedTxKeys sharedPeerState `IntSet.union` advertisedKeys + }) + -- Intern a list of txids into an otherwise empty shared state. mkSharedState :: [TxId] -> SharedTxState PeerAddr TxId mkSharedState txids = snd (internTxIds txids emptySharedTxState) --- Construct an advertiser set for a group of peers. -mkAdvertisers :: [(PeerAddr, TxOwnerAckState)] -> Set.Set PeerAddr -mkAdvertisers = Set.fromList . fmap fst - -- Construct a requested batch together with its cached key set. mkRequestedTxBatch :: [TxKey] -> SizeInBytes -> RequestedTxBatch mkRequestedTxBatch keys requestedTxBatchSize = RequestedTxBatch @@ -2005,8 +2211,7 @@ mkRequestedTxBatch keys requestedTxBatchSize = RequestedTxBatch mkTxEntry :: PeerAddr -> SizeInBytes -> Maybe TxAttemptState -> TxEntry PeerAddr mkTxEntry peeraddr _txSize mAttempt = TxEntry { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertisers = Set.singleton peeraddr - , txTieBreakSalt = 0 + , txAdvertiserCount = 1 , txAttempts = maybe Map.empty (Map.singleton peeraddr) mAttempt } @@ -2229,24 +2434,31 @@ mkActiveSharedState allPeers ownerPeer resolvedAdvertisers txidsAndSizes = | (txid, _txSize) <- txidsAndSizes , let txKey = lookupKeyOrFail txid sharedState1 ] + , sharedPeers = + Map.fromList + [ (peeraddr, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { + sharedPeerAdvertisedTxKeys = + Map.findWithDefault IntSet.empty peeraddr peerAdvertisedKeys + }) + | peeraddr <- allPeers + ] } where - sharedState0 = - emptySharedTxState { - sharedPeers = - Map.fromList - [ (peeraddr, mkSharedPeerState PeerIdle (emptyPeerScore now)) - | peeraddr <- allPeers - ] - } + sharedState0 = emptySharedTxState sharedState1 = snd (internTxIds (fmap fst txidsAndSizes) sharedState0) advertisers = Set.fromList (ownerPeer : resolvedAdvertisers) + peerAdvertisedKeys = + Map.fromListWith IntSet.union + [ (peeraddr, IntSet.singleton (unTxKey txKey)) + | (txid, _txSize) <- txidsAndSizes + , let txKey = lookupKeyOrFail txid sharedState1 + , peeraddr <- Set.toList advertisers + ] - mkEntry txKey = TxEntry + mkEntry _txKey = TxEntry { txLease = TxLeased ownerPeer (addTime 10 now) - , txAdvertisers = advertisers - , txTieBreakSalt = unTxKey txKey + , txAdvertiserCount = Set.size advertisers , txAttempts = Map.empty } @@ -2255,6 +2467,11 @@ mkActiveSharedState allPeers ownerPeer resolvedAdvertisers txidsAndSizes = retainAllActiveTxs :: SharedTxState PeerAddr TxId -> SharedTxState PeerAddr TxId retainAllActiveTxs st@SharedTxState { sharedTxTable, sharedRetainedTxs, sharedGeneration } = st { + sharedPeers = + Map.map + (\sharedPeerState -> + sharedPeerState { sharedPeerAdvertisedTxKeys = IntSet.empty }) + (sharedPeers st), sharedTxTable = IntMap.empty, sharedRetainedTxs = IntMap.foldlWithKey' retainOne sharedRetainedTxs sharedTxTable, sharedGeneration = sharedGeneration + 1 diff --git a/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs b/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs index a50e49b4b0a..86dfbeee090 100644 --- a/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs +++ b/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs @@ -35,7 +35,7 @@ instance (Show txid, Show peeraddr) => LogFormatting (TraceTxLogic peeraddr txid length [ () | TxEntry { txLease = TxLeased _ _ } <- activeEntries ] claimableTxCount = - length [ () | TxEntry { txLease = TxClaimable } <- activeEntries ] + length [ () | TxEntry { txLease = TxClaimable _ } <- activeEntries ] resolvedTxCount = 0 :: Int From f4221e646a9a201d7bb89654b556355367bd3d39 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Sat, 11 Apr 2026 18:18:14 +0200 Subject: [PATCH 17/67] fixup: fix first ack starvation The first peer that advertised a new txid always got a lease on it. This is a problem since the peer may be at capacity and unable to request the TX. Update the behaviour so that anyone peer that advertise a txid can gain the first claim. --- .../TxSubmission/Inbound/V2/Registry.hs | 5 +- .../Network/TxSubmission/Inbound/V2/State.hs | 10 +-- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 69 ++++++++++++++++++- 3 files changed, 74 insertions(+), 10 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index f8ae48c5d2c..e1d60648f0b 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -344,8 +344,9 @@ runNextPeerActionPipelinedImp policy sharedStateVar peeraddr now peerState = ato -- | Process a batch of txids received from this peer. -- -- Interns new txids into the shared state, updates the peer's unacknowledged queue, --- handles mempool fast-path for already-known txids, and sets up initial lease --- ownership for first advertisers. Returns updated peer-local state. +-- handles mempool fast-path for already-known txids, and leaves fresh txids +-- claimable so any advertising peer can later claim them. Returns updated +-- peer-local state. applyReceivedTxIdsImp :: ( MonadSTM m , Ord peeraddr , Ord txid ) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index a5bd9e1689a..5f0f9822342 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -1143,10 +1143,10 @@ markSubmittingTxs peeraddr txKeys st = -- | Handle a batch of txids received from one peer. -- -- Newly seen txids are interned, appended to the peer's unacknowledged queue, --- and entered into the shared tx table. The first advertiser gets the initial --- lease and may acknowledge once the body is buffered locally. Later --- advertisers are tracked as backups and may only acknowledge once the tx is --- resolved. +-- and entered into the shared tx table as claimable work. Any peer that later +-- advertises the txid may claim it once its score-derived delay has elapsed, +-- which avoids pinning fresh work to the first peer that happened to announce +-- it. handleReceivedTxIds :: forall peeraddr txid tx. (Ord peeraddr, Ord txid) => (txid -> Bool) -> Time @@ -1277,7 +1277,7 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize case IntMap.lookup k (sharedTxTable sharedAcc') of Nothing -> let txEntry = TxEntry { - txLease = TxLeased peeraddr (addTime (interTxSpace policy) now), + txLease = TxClaimable now, txAdvertiserCount = 1, txAttempts = Map.empty } diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index eafa405e8de..b7d7ce1f260 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -58,6 +58,7 @@ tests = , testProperty "handleReceivedTxIds resolves txids already in mempool" prop_handleReceivedTxIds_knownToMempool , testProperty "handleReceivedTxIds keeps retained txids local-only" prop_handleReceivedTxIds_retainedIsLocalOnly , testCaseSteps "handleReceivedTxIds adds the current peer as an advertiser for active txs" unit_handleReceivedTxIds_addsAdvertiserForActiveTxs + , testCaseSteps "nextPeerAction lets another peer claim a fresh tx when the first advertiser is full" unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull , testProperty "handleReceivedTxs buffers received and drops omitted txs" prop_handleReceivedTxs_buffersAndDropsOmitted , testProperty "handleReceivedTxs drops late bodies already retained or in mempool" prop_handleReceivedTxs_dropsLateBodies , testProperty "handleReceivedTxs penalizes omitted txs after full prune" prop_handleReceivedTxs_penalizesOmittedAfterPrune @@ -300,7 +301,7 @@ instance Arbitrary ArbSharedTxState where | sharedState == emptySharedTxState = [] | otherwise = ArbSharedTxState <$> shrinkSharedTxState sharedState --- Verifies that handleReceivedTxIds interns new txids, adds leased entries +-- Verifies that handleReceivedTxIds interns new txids, adds claimable entries -- for them, and preserves unrelated shared-state entries. prop_handleReceivedTxIds_newEntries :: Positive Int @@ -334,7 +335,6 @@ prop_handleReceivedTxIds_newEntries (Positive peeraddr) (ArbSharedTxState shared peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = requestedToReply + fromIntegral extraRequested } (peerState', sharedState') = handleReceivedTxIds (const False) now defaultTxDecisionPolicy peeraddr requestedToReply txidsAndSizes peerState0 sharedStateBase - expectedLeaseUntil = addTime (interTxSpace defaultTxDecisionPolicy) now expectedAdvertisedKeys = sharedPeerAdvertisedTxKeys (lookupPeerOrFail peeraddr sharedStateBase) `IntSet.union` @@ -348,7 +348,7 @@ prop_handleReceivedTxIds_newEntries (Positive peeraddr) (ArbSharedTxState shared Nothing -> counterexample ("missing tx entry for " ++ show txid) False Just TxEntry { txLease, txAdvertiserCount, txAttempts } -> conjoin - [ txLease === TxLeased peeraddr expectedLeaseUntil + [ txLease === TxClaimable now , txAdvertiserCount === 1 , txAttempts === Map.empty ] @@ -456,6 +456,69 @@ unit_handleReceivedTxIds_addsAdvertiserForActiveTxs step = do ] expectedAdvertisedKeys = IntSet.fromList (map unTxKey txKeys) +unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull :: (String -> IO ()) -> Assertion +unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull step = do + step "Receive a fresh txid from peer A while A is already at its inflight size limit" + let (peerAState1, sharedState1) = + handleReceivedTxIds + (const False) + now + defaultTxDecisionPolicy + peerA + requestedToReply + [(txid, txSize)] + peerAState0 + sharedState0 + txLease (lookupEntryOrFail key sharedState1) @?= TxClaimable now + let (peerAAction, _, _) = + nextPeerAction now defaultTxDecisionPolicy peerA peerAState1 sharedState1 + step "Run nextPeerAction for peer A and confirm the fresh tx remains unclaimed because A is full" + case peerAAction of + PeerDoNothing _ _ -> pure () + other -> + assertFailure ("unexpected peer A action: " ++ show other) + step "Receive the same txid from peer B and run nextPeerAction for B" + let (peerBState1, sharedState2) = + handleReceivedTxIds + (const False) + now + defaultTxDecisionPolicy + peerB + requestedToReply + [(txid, txSize)] + peerBState0 + sharedState1 + (peerBAction, peerBState2, sharedState3) = + nextPeerAction now defaultTxDecisionPolicy peerB peerBState1 sharedState2 + case peerBAction of + PeerRequestTxs txKeys -> do + step "Assert peer B can claim and request the fresh tx immediately" + txKeys @?= [key] + peerRequestedTxs peerBState2 @?= IntSet.singleton k + txLease (lookupEntryOrFail key sharedState3) @?= + TxLeased peerB (addTime (interTxSpace defaultTxDecisionPolicy) now) + other -> + assertFailure ("unexpected peer B action: " ++ show other) + where + peerA = 7 :: PeerAddr + peerB = 8 :: PeerAddr + txid = 1 + txSize = mkSize (Positive 10) + requestedToReply = 1 + key = TxKey 0 + k = unTxKey key + sharedState0 = emptySharedTxState + { sharedPeers = Map.fromList + [ (peerA, mkSharedPeerState PeerIdle (emptyPeerScore now)) + , (peerB, mkSharedPeerState PeerIdle (emptyPeerScore now)) + ] + } + peerAState0 = emptyPeerTxLocalState + { peerRequestedTxIds = requestedToReply + , peerRequestedTxsSize = txsSizeInflightPerPeer defaultTxDecisionPolicy + } + peerBState0 = emptyPeerTxLocalState { peerRequestedTxIds = requestedToReply } + -- Verifies that handleReceivedTxs buffers received bodies and removes omitted -- requested txs from peer and shared state. prop_handleReceivedTxs_buffersAndDropsOmitted From 0985ed7fa2ed5ff3dd91dc3e171de5bbfa227bd9 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Sun, 12 Apr 2026 09:03:10 +0200 Subject: [PATCH 18/67] fixup: replace the local state tvar Replace the local state tvar with V1's Stateful types. --- .../Network/TxSubmission/Inbound/V2.hs | 175 +++++++++--------- 1 file changed, 87 insertions(+), 88 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs index 4d3674f30fb..16a6efab2f2 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE BangPatterns #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE KindSignatures #-} @@ -39,6 +40,35 @@ import Ouroboros.Network.TxSubmission.Inbound.V2.Registry as V2 import Ouroboros.Network.TxSubmission.Inbound.V2.Types as V2 import Ouroboros.Network.TxSubmission.Mempool.Reader +-- The same Stateful types as V1 uses. +newtype Stateful s n txid tx m = Stateful (s -> ServerStIdle n txid tx m ()) + +newtype StatefulM s n txid tx m + = StatefulM (s -> m (ServerStIdle n txid tx m ())) + +newtype StatefulCollect s n txid tx m + = StatefulCollect (s -> Collect txid tx -> m (ServerStIdle n txid tx m ())) + +continueWithState :: Stateful s n txid tx m + -> s + -> ServerStIdle n txid tx m () +continueWithState (Stateful f) !st = + f st + +continueWithStateM :: StatefulM s n txid tx m + -> s + -> m (ServerStIdle n txid tx m ()) +continueWithStateM (StatefulM f) !st = + f st +{-# NOINLINE continueWithStateM #-} + +collectAndContinueWithState :: StatefulCollect s n txid tx m + -> s + -> Collect txid tx + -> m (ServerStIdle n txid tx m ()) +collectAndContinueWithState (StatefulCollect f) !st c = + f st c +{-# NOINLINE collectAndContinueWithState #-} -- | A tx-submission inbound side (server, sic!). -- @@ -83,17 +113,10 @@ txSubmissionInboundV2 addCounters } = TxSubmissionServerPipelined $ do - -- The pipelined server API does not thread a user state parameter through - -- `ServerStIdle`. Multiple continuations here resume after network IO and - -- must all access and update the latest peer-local state, so a plain - -- `PeerTxLocalState` captured in closures would go stale. - -- - -- No other threads access the peer's peerStateVar. - peerStateVar <- newTVarIO emptyPeerTxLocalState case initDelay of TxSubmissionInitDelay delay -> threadDelay delay NoTxSubmissionInitDelay -> return () - serverIdle peerStateVar + continueWithStateM serverIdle emptyPeerTxLocalState where -- Entry point and reset state for the non-pipelined server loop. @@ -102,34 +125,27 @@ txSubmissionInboundV2 -- 1. The server first starts -- 2. All pipelined requests have completed and the counter returns to zero -- 3. An idle peer wakes up after a @PeerDoNothing@ wait - serverIdle :: StrictTVar m (PeerTxLocalState tx) - -> m (ServerStIdle Z txid tx m ()) - serverIdle peerStateVar = do - peerState <- readTVarIO peerStateVar - + serverIdle :: StatefulM (PeerTxLocalState tx) Z txid tx m + serverIdle = StatefulM $ \peerState -> do now <- getMonotonicTime (peerAction, peerState') <- runNextPeerAction now peerState - atomically $ writeTVar peerStateVar peerState' case peerAction of PeerDoNothing generation mDelay -> do awaitSharedChange generation mDelay - serverIdle peerStateVar + continueWithStateM serverIdle peerState' PeerSubmitTxs txKeys -> - submitBufferedTxs peerStateVar txKeys (serverIdle peerStateVar) + continueWithStateM (submitBufferedTxs txKeys serverIdle) peerState' PeerRequestTxs txKeys -> - requestTxBodies peerStateVar Zero txKeys - PeerRequestTxIds txIdsToAck txIdsToReq -> do - serverReqTxIds peerStateVar Zero txIdsToAck txIdsToReq + continueWithStateM (requestTxBodies Zero txKeys) peerState' + PeerRequestTxIds txIdsToAck txIdsToReq -> + continueWithStateM (serverReqTxIds Zero txIdsToAck txIdsToReq) peerState' -- | Submit buffered transaction bodies to the mempool. submitBufferedTxs :: forall (n :: N). - StrictTVar m (PeerTxLocalState tx) - -> [TxKey] - -> m (ServerStIdle n txid tx m ()) - -> m (ServerStIdle n txid tx m ()) - submitBufferedTxs peerStateVar txKeys k = do - - peerState <- readTVarIO peerStateVar + [TxKey] + -> StatefulM (PeerTxLocalState tx) n txid tx m + -> StatefulM (PeerTxLocalState tx) n txid tx m + submitBufferedTxs txKeys k = StatefulM $ \peerState -> do bufferedTxs <- resolveBufferedTxs peerState txKeys -- Flags the txs as on the way to the mempool, which temporarily blocks further @@ -171,82 +187,72 @@ txSubmissionInboundV2 traceWith tracer (TraceTxInboundRejectedFromMempool rejectedForTrace delta) peerState' <- applySubmittedTxs end resolvedTxKeys (fmap fst rejectedTxs) peerState - atomically $ writeTVar peerStateVar peerState' - k + continueWithStateM k peerState' -- Request transaction bodies from the peer. requestTxBodies :: forall (n :: N). - StrictTVar m (PeerTxLocalState tx) - -> Nat n + Nat n -> [TxKey] - -> m (ServerStIdle n txid tx m ()) - requestTxBodies peerStateVar n txKeys = do - peerState <- readTVarIO peerStateVar + -> StatefulM (PeerTxLocalState tx) n txid tx m + requestTxBodies n txKeys = StatefulM $ \peerState -> do txsToRequest <- resolveTxRequest peerState txKeys traceWith tracer (TraceTxInboundRequestTxs (Map.keys txsToRequest)) addCounters mempty { txMessagesSent = 1 , txsRequested = fromIntegral (Map.size txsToRequest) } pure $ SendMsgRequestTxsPipelined txsToRequest - (continueAfterBodyRequests peerStateVar (Succ n)) + (continueWithStateM (continueAfterBodyRequests (Succ n)) peerState) -- Continue processing after receiving replies from the peer in pipelined mode. continueAfterReplies :: forall (n :: N). - StrictTVar m (PeerTxLocalState tx) - -> Nat n - -> m (ServerStIdle n txid tx m ()) - continueAfterReplies peerStateVar Zero = serverIdle peerStateVar - continueAfterReplies peerStateVar n@Succ{} = do - peerState <- readTVarIO peerStateVar + Nat n + -> StatefulM (PeerTxLocalState tx) n txid tx m + continueAfterReplies Zero = serverIdle + continueAfterReplies n@Succ{} = StatefulM $ \peerState -> do now <- getMonotonicTime (peerAction, peerState') <- runNextPeerActionPipelined now peerState - atomically $ writeTVar peerStateVar peerState' case peerAction of PeerSubmitTxs txKeys -> - submitBufferedTxs peerStateVar txKeys (continueAfterReplies peerStateVar n) + continueWithStateM (submitBufferedTxs txKeys (continueAfterReplies n)) peerState' PeerRequestTxs txKeys -> - requestTxBodies peerStateVar n txKeys + continueWithStateM (requestTxBodies n txKeys) peerState' PeerRequestTxIds txIdsToAck txIdsToReq -> - serverReqTxIds peerStateVar n txIdsToAck txIdsToReq + continueWithStateM (serverReqTxIds n txIdsToAck txIdsToReq) peerState' PeerDoNothing {} -> - handleReplies peerStateVar n + pure $ continueWithState (handleReplies n) peerState' -- Continue processing after receiving transaction body replies in pipelined mode. continueAfterBodyRequests :: forall (n :: N). - StrictTVar m (PeerTxLocalState tx) - -> Nat (S n) - -> m (ServerStIdle (S n) txid tx m ()) - continueAfterBodyRequests peerStateVar n = do - peerState <- readTVarIO peerStateVar + Nat (S n) + -> StatefulM (PeerTxLocalState tx) (S n) txid tx m + continueAfterBodyRequests n = StatefulM $ \peerState -> do now <- getMonotonicTime (peerAction, peerState') <- runNextPeerActionPipelined now peerState - atomically $ writeTVar peerStateVar peerState' case peerAction of PeerSubmitTxs txKeys -> - submitBufferedTxs peerStateVar txKeys (continueAfterReplies peerStateVar n) + continueWithStateM (submitBufferedTxs txKeys (continueAfterReplies n)) peerState' PeerRequestTxs txKeys -> - requestTxBodies peerStateVar n txKeys - PeerRequestTxIds txIdsToAck txIdsToReq -> do - serverReqTxIds peerStateVar n txIdsToAck txIdsToReq + continueWithStateM (requestTxBodies n txKeys) peerState' + PeerRequestTxIds txIdsToAck txIdsToReq -> + continueWithStateM (serverReqTxIds n txIdsToAck txIdsToReq) peerState' PeerDoNothing {} -> - handleReplies peerStateVar n + pure $ continueWithState (handleReplies n) peerState' -- Construct and send a txid request message to the peer. serverReqTxIds :: forall (n :: N). - StrictTVar m (PeerTxLocalState tx) - -> Nat n + Nat n -> NumTxIdsToAck -> NumTxIdsToReq - -> m (ServerStIdle n txid tx m ()) + -> StatefulM (PeerTxLocalState tx) n txid tx m -- No requests pending; transitions back to @serverIdle@ - serverReqTxIds peerStateVar Zero 0 0 = serverIdle peerStateVar + serverReqTxIds Zero 0 0 = serverIdle -- Requests complete but pipeline not empty, continues to -- @handleReplies@ to process remaining in-flight replies - serverReqTxIds peerStateVar n@Succ{} 0 0 = handleReplies peerStateVar n + serverReqTxIds n@Succ{} 0 0 = StatefulM $ \peerState -> + pure $ continueWithState (handleReplies n) peerState -- Non-pipelined request, may send a blocking request - serverReqTxIds peerStateVar Zero txIdsToAck txIdsToReq = do - peerState <- readTVarIO peerStateVar + serverReqTxIds Zero txIdsToAck txIdsToReq = StatefulM $ \peerState -> do addCounters mempty { txIdMessagesSent = 1 , txIdsRequested = fromIntegral txIdsToReq } if StrictSeq.null (peerUnacknowledgedTxIds peerState) @@ -262,54 +268,47 @@ txSubmissionInboundV2 throwIO ProtocolErrorTxIdsNotRequested addCounters mempty { txIdRepliesReceived = 1 , txIdsReceived = fromIntegral (length txids') } - peerStateCurrent <- readTVarIO peerStateVar - peerState' <- applyReceivedTxIds now txIdsToReq txids' peerStateCurrent - atomically $ writeTVar peerStateVar peerState' - serverIdle peerStateVar) + peerState' <- applyReceivedTxIds now txIdsToReq txids' peerState + continueWithStateM serverIdle peerState') else pure $ SendMsgRequestTxIdsPipelined txIdsToAck txIdsToReq - (handleReplies peerStateVar (Succ Zero)) + (pure $ continueWithState (handleReplies (Succ Zero)) peerState) -- Pipelined request at depth > 0. Sends a pipelined message and continues -- to @handleReplies@. - serverReqTxIds peerStateVar n@Succ{} txIdsToAck txIdsToReq = do + serverReqTxIds n@Succ{} txIdsToAck txIdsToReq = StatefulM $ \peerState -> do addCounters mempty { txIdMessagesSent = 1 , txIdsRequested = fromIntegral txIdsToReq } pure $ SendMsgRequestTxIdsPipelined txIdsToAck txIdsToReq - (handleReplies peerStateVar (Succ n)) + (pure $ continueWithState (handleReplies (Succ n)) peerState) -- Prepare to collect pipelined replies from the peer. handleReplies :: forall (n :: N). - StrictTVar m (PeerTxLocalState tx) - -> Nat (S n) - -> m (ServerStIdle (S n) txid tx m ()) - handleReplies peerStateVar (Succ Zero) = - pure $ CollectPipelined Nothing (handleReply peerStateVar Zero) + Nat (S n) + -> Stateful (PeerTxLocalState tx) (S n) txid tx m + handleReplies (Succ Zero) = Stateful $ \peerState -> + CollectPipelined Nothing (collectAndContinueWithState (handleReply Zero) peerState) - handleReplies peerStateVar (Succ n'@Succ{}) = - pure $ CollectPipelined Nothing (handleReply peerStateVar n') + handleReplies (Succ n'@Succ{}) = Stateful $ \peerState -> + CollectPipelined Nothing (collectAndContinueWithState (handleReply n') peerState) -- Process a single pipelined reply from the peer. handleReply :: forall (n :: N). - StrictTVar m (PeerTxLocalState tx) - -> Nat n - -> Collect txid tx - -> m (ServerStIdle n txid tx m ()) - handleReply peerStateVar n = \case + Nat n + -> StatefulCollect (PeerTxLocalState tx) n txid tx m + handleReply n = StatefulCollect $ \peerState -> \case CollectTxIds txIdsToReq txids -> do unless (length txids <= fromIntegral txIdsToReq) $ throwIO ProtocolErrorTxIdsNotRequested addCounters mempty { txIdRepliesReceived = 1 , txIdsReceived = fromIntegral (length txids) } - peerState <- readTVarIO peerStateVar now <- getMonotonicTime peerState' <- applyReceivedTxIds now txIdsToReq txids peerState - atomically $ writeTVar peerStateVar peerState' - continueAfterReplies peerStateVar n + continueWithStateM (continueAfterReplies n) peerState' CollectTxs requested txs -> do let received = Map.fromList [ (txId tx, tx) | tx <- txs ] @@ -321,13 +320,11 @@ txSubmissionInboundV2 let protocolError = ProtocolErrorTxSizeError wrongSizedTxs traceWith tracer (TraceTxInboundError protocolError) throwIO protocolError - peerState <- readTVarIO peerStateVar now <- getMonotonicTime (penaltyCount, peerState') <- applyReceivedTxs now [ (txId tx, tx) | tx <- txs ] peerState - atomically $ writeTVar peerStateVar peerState' unless (penaltyCount == 0) $ void $ countRejectedTxs now penaltyCount - continueAfterReplies peerStateVar n + continueWithStateM (continueAfterReplies n) peerState' -- Partition submitted transactions into accepted and rejected groups classifySubmittedTxs :: [(TxKey, txid)] @@ -372,3 +369,5 @@ txSubmissionInboundV2 received - advertised <= const_MAX_TX_SIZE_DISCREPANCY | otherwise = advertised - received <= const_MAX_TX_SIZE_DISCREPANCY + + From c656128620debc9a239883abdc4cb3bc22a36128 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Sun, 12 Apr 2026 09:42:44 +0200 Subject: [PATCH 19/67] fixup: benchmark for a single server --- .../bench/Bench/TxSubmissionV2Server.hs | 198 ++++++++++++++++++ ouroboros-network/bench/Main.hs | 7 + ouroboros-network/ouroboros-network.cabal | 8 +- 3 files changed, 212 insertions(+), 1 deletion(-) create mode 100644 ouroboros-network/bench/Bench/TxSubmissionV2Server.hs diff --git a/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs b/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs new file mode 100644 index 00000000000..d46dcaede58 --- /dev/null +++ b/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs @@ -0,0 +1,198 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} + +module Bench.TxSubmissionV2Server + ( DirectServerFixture + , DirectServerResult + , mkDirectServerFixture + , runDirectServerBenchmark + ) where + +import Control.Concurrent.Class.MonadSTM qualified as Lazy +import Control.Concurrent.Class.MonadSTM.Strict +import Control.DeepSeq (NFData) +import Control.Tracer (nullTracer) + +import Data.List.NonEmpty qualified as NonEmpty +import Data.Map.Strict (Map) +import Data.Map.Strict qualified as Map +import GHC.Generics (Generic) + +import Ouroboros.Network.Protocol.TxSubmission2.Server +import Ouroboros.Network.Protocol.TxSubmission2.Type + (NumTxIdsToReq (getNumTxIdsToReq), SizeInBytes (..)) +import Ouroboros.Network.TxSubmission.Inbound.V2 + (TxSubmissionInitDelay (NoTxSubmissionInitDelay), + defaultTxDecisionPolicy, txSubmissionInboundV2) +import Ouroboros.Network.TxSubmission.Inbound.V2.Registry (newSharedTxStateVar, + newTxSubmissionCountersVar, withPeer) +import Ouroboros.Network.TxSubmission.Inbound.V2.Types (TxSubmissionCounters, + emptySharedTxState) + +import Test.Ouroboros.Network.TxSubmission.Types + (Tx (..), TxId, + emptyMempool, getMempoolReader, getMempoolWriter, readMempool) + + +data DirectServerFixture = DirectServerFixture + { dsPeerAddr :: !Int + , dsTxIdReplyBatches :: !Int + , dsTxSize :: !SizeInBytes + } + deriving stock (Eq, Show, Generic) + deriving anyclass NFData + + +data DirectServerResult = DirectServerResult + { dsAcceptedTxs :: !Int + , dsCounters :: !TxSubmissionCounters + } + deriving stock (Eq, Show, Generic) + deriving anyclass NFData + + +data PendingReply + = PendingTxIds !NumTxIdsToReq ![(TxId, SizeInBytes)] + | PendingTxs !(Map TxId SizeInBytes) + + +mkDirectServerFixture + :: Int -> DirectServerFixture +mkDirectServerFixture batches = + DirectServerFixture + { dsPeerAddr = 1 + , dsTxIdReplyBatches = batches + , dsTxSize = SizeInBytes 1024 + } + + +runDirectServerBenchmark + :: DirectServerFixture -> IO DirectServerResult +runDirectServerBenchmark + DirectServerFixture { + dsPeerAddr, + dsTxIdReplyBatches, + dsTxSize + } = do + inboundMempool <- emptyMempool + duplicateTxIdsVar <- Lazy.newTVarIO [] + sharedStateVar <- newSharedTxStateVar emptySharedTxState + countersVar <- newTxSubmissionCountersVar mempty + + withPeer + defaultTxDecisionPolicy + (getMempoolReader inboundMempool) + sharedStateVar + countersVar + dsPeerAddr + $ \api -> do + let server = + txSubmissionInboundV2 + nullTracer + NoTxSubmissionInitDelay + (getMempoolReader inboundMempool) + (getMempoolWriter duplicateTxIdsVar inboundMempool) + getTxSize + api + + case server of + TxSubmissionServerPipelined initServer -> do + st0 <- initServer + driveServer + dsTxSize + dsTxIdReplyBatches + 1 + [] + st0 + + (DirectServerResult + . length <$> readMempool inboundMempool) + <*> readTVarIO countersVar + + +driveServer + :: SizeInBytes + -> Int + -> TxId + -> [PendingReply] + -> ServerStIdle n TxId (Tx TxId) IO () + -> IO () +driveServer !txSize !remainingBatches !nextTxId !pending = + \case + SendMsgRequestTxIdsBlocking _ req kDone k + | remainingBatches <= 0 -> kDone + | otherwise -> do + let (txids, nextTxId') = mkTxIdReply txSize nextTxId req + st' <- k (NonEmpty.fromList txids) + driveServer txSize (remainingBatches - 1) nextTxId' pending st' + + SendMsgRequestTxIdsPipelined _ req k -> do + let (txids, nextTxId', remainingBatches') = + if remainingBatches <= 0 + then ([], nextTxId, remainingBatches) + else let (txids', nextTxId'') = mkTxIdReply txSize nextTxId req + in (txids', nextTxId'', remainingBatches - 1) + pending' = pending ++ [PendingTxIds req txids] + st' <- k + driveServer txSize remainingBatches' nextTxId' pending' st' + + SendMsgRequestTxsPipelined requested k -> do + st' <- k + driveServer + txSize + remainingBatches + nextTxId + (pending ++ [PendingTxs requested]) + st' + + CollectPipelined mNone collect -> + case pending of + reply : pending' -> do + st' <- collect (renderPendingReply reply) + driveServer txSize remainingBatches nextTxId pending' st' + [] -> + case mNone of + Just k -> k >>= driveServer txSize remainingBatches nextTxId [] + Nothing -> + error $ + "TxSubmissionV2 direct benchmark: unexpected " + ++ "CollectPipelined with no pending replies" + + +mkTxIdReply + :: SizeInBytes + -> TxId + -> NumTxIdsToReq + -> ([(TxId, SizeInBytes)], TxId) +mkTxIdReply txSize nextTxId req = + ( [ (txid, txSize) + | txid <- [nextTxId .. nextTxId + replyCount - 1] + ] + , nextTxId + replyCount + ) + where + replyCount = fromIntegral (getNumTxIdsToReq req) + + +renderPendingReply + :: PendingReply -> Collect TxId (Tx TxId) +renderPendingReply = \case + PendingTxIds req txids -> + CollectTxIds req txids + + PendingTxs requested -> + CollectTxs requested + [ Tx { + getTxId = txid, + getTxSize = txSize, + getTxAdvSize = txSize, + getTxValid = True, + getTxParent = Nothing + } + | (txid, txSize) <- Map.toAscList requested + ] diff --git a/ouroboros-network/bench/Main.hs b/ouroboros-network/bench/Main.hs index 4e71c99a770..b5535d1242d 100644 --- a/ouroboros-network/bench/Main.hs +++ b/ouroboros-network/bench/Main.hs @@ -2,6 +2,8 @@ module Main (main) where +import Bench.TxSubmissionV2Server qualified as DirectV2 + import Control.DeepSeq (NFData, rnf) import Control.Exception (evaluate) import System.Mem (performMajorGC) @@ -36,6 +38,11 @@ main = , env (prepareEnv (TX.mkFanoutFixture 100 3)) $ \fixture -> bench "scenario/fanout-retained/100peers/3txids/x1000" $ nfAppIO (TX.runFanoutLoop benchLoops) fixture + , env + (prepareEnv (DirectV2.mkDirectServerFixture 1_000)) + $ \fixture -> + bench "server/direct-interpreter/single-peer/1000batches" $ + nfAppIO DirectV2.runDirectServerBenchmark fixture ] ] ] diff --git a/ouroboros-network/ouroboros-network.cabal b/ouroboros-network/ouroboros-network.cabal index 023e28d2952..d0de7b4e22d 100644 --- a/ouroboros-network/ouroboros-network.cabal +++ b/ouroboros-network/ouroboros-network.cabal @@ -1050,11 +1050,17 @@ benchmark sim-benchmarks type: exitcode-stdio-1.0 hs-source-dirs: bench main-is: Main.hs + other-modules: + Bench.TxSubmissionV2Server build-depends: base, containers, + contra-tracer, deepseq, - ouroboros-network:{ouroboros-network, ouroboros-network-tests-lib}, + io-classes:{io-classes, strict-stm}, + ouroboros-network:ouroboros-network, + ouroboros-network:ouroboros-network-tests-lib, + ouroboros-network:protocols, pretty-simple, splitmix, tasty-bench >=0.3.5, From cae4b28cfc7be2fc06501bca98baecd3d0347410 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Mon, 13 Apr 2026 11:14:25 +0200 Subject: [PATCH 20/67] fixup: improved comment Improve comment around deletion of retaind txs. --- .../lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 5f0f9822342..4a5635b3aed 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -77,7 +77,10 @@ mkPeerActionContext now policy peeraddr peerState sharedState = pacClaimDelay = peerClaimDelay policy now (sharedPeerScore sharedPeerState') } where - -- Remove expireds TX keys from the shared state + -- Remove expired retained TX keys from all shared state tables. + -- When the retain timer expires, the peer gives up waiting for this txid + -- and will acknowledge it. We remove from all tables so the tx can be + -- re-advertised if needed. sharedState' = let expiredRetainedKeys = retainedExpiredKeys now (sharedRetainedTxs sharedState) prunedSharedState = dropTxKeys expiredRetainedKeys sharedState in From 679f432b50c9fa29b7d545af6e20073d448853aa Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Mon, 13 Apr 2026 11:15:15 +0200 Subject: [PATCH 21/67] fixup: inline retained functions The retained functions in Types.hs are just small wrappes around IntPSQ functions. --- .../Network/TxSubmission/Inbound/V2/Types.hs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 0205366e67e..0d15bdb8645 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -402,16 +402,19 @@ retainedToList = retainedSize :: RetainedTxs -> Int retainedSize = IntPSQ.size +{-# INLINE retainedSize #-} retainedLookup :: Int -> RetainedTxs -> Maybe Time retainedLookup k retained = fmap fst (IntPSQ.lookup k retained) +{-# INLINE retainedLookup #-} retainedMember :: Int -> RetainedTxs -> Bool retainedMember k retained = case IntPSQ.lookup k retained of Just _ -> True Nothing -> False +{-# INLINE retainedMember #-} retainedInsertMax :: Int -> Time -> RetainedTxs -> RetainedTxs retainedInsertMax k retainUntil retained = @@ -421,14 +424,17 @@ retainedInsertMax k retainUntil retained = case retainedLookup k retained of Just existing -> max existing retainUntil Nothing -> retainUntil +{-# INLINE retainedInsertMax #-} retainedDeleteKeys :: IntSet -> RetainedTxs -> RetainedTxs retainedDeleteKeys keys retained = IntSet.foldl' (flip IntPSQ.delete) retained keys +{-# INLINE retainedDeleteKeys #-} retainedKeysSet :: RetainedTxs -> IntSet retainedKeysSet = IntPSQ.fold' (\k _ _ acc -> IntSet.insert k acc) IntSet.empty +{-# INLINE retainedKeysSet #-} retainedRestrictKeys :: RetainedTxs -> IntSet -> RetainedTxs retainedRestrictKeys retained keys = @@ -437,6 +443,7 @@ retainedRestrictKeys retained keys = keep k retainUntil _ | IntSet.member k keys = IntPSQ.insert k retainUntil () | otherwise = id +{-# INLINE retainedRestrictKeys #-} retainedNextWake :: Time -> RetainedTxs -> Maybe Time retainedNextWake currentTime = @@ -448,7 +455,8 @@ retainedNextWake currentTime = | retainUntil > currentTime -> Just retainUntil | otherwise -> go retained' Nothing -> - Nothing + Nothing +{-# INLINE retainedNextWake #-} retainedExpiredKeys :: Time -> RetainedTxs -> IntSet retainedExpiredKeys currentTime = @@ -463,6 +471,7 @@ retainedExpiredKeys currentTime = expired Nothing -> expired +{-# INLINE retainedExpiredKeys #-} peerGenerationOf :: Ord peeraddr => peeraddr From eb7125bb2275a260fb3cb5cbfb9e59cee42b55c3 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 14 Apr 2026 07:49:39 +0200 Subject: [PATCH 22/67] fixup: nothunks for tests Use nothunks to assert that there are no thunks after some property based tests. --- .../framework/lib/NoThunks/Class/Orphans.hs | 3 + .../Network/TxSubmission/Inbound/V2/Types.hs | 33 ++++++---- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 66 +++++++++++++++---- 3 files changed, 80 insertions(+), 22 deletions(-) diff --git a/ouroboros-network/framework/lib/NoThunks/Class/Orphans.hs b/ouroboros-network/framework/lib/NoThunks/Class/Orphans.hs index 8bfa119b075..a61c1c6cb8d 100644 --- a/ouroboros-network/framework/lib/NoThunks/Class/Orphans.hs +++ b/ouroboros-network/framework/lib/NoThunks/Class/Orphans.hs @@ -7,6 +7,7 @@ module NoThunks.Class.Orphans where import Data.IntPSQ (IntPSQ) import Data.IntPSQ qualified as IntPSQ +import Data.IntSet (IntSet) import Data.OrdPSQ (OrdPSQ) import Data.OrdPSQ qualified as OrdPSQ @@ -33,3 +34,5 @@ instance (NoThunks k, NoThunks v) => NoThunks (IntPSQ k v) where showTypeOf _ = "IntPSQ" deriving via InspectHeap SockAddr instance NoThunks SockAddr + +deriving via InspectHeap IntSet instance NoThunks IntSet diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 0d15bdb8645..215d20f6b90 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -1,8 +1,10 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE DerivingVia #-} {-# LANGUAGE ExistentialQuantification #-} {-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE GeneralisedNewtypeDeriving #-} {-# LANGUAGE NamedFieldPuns #-} @@ -71,6 +73,9 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.Types import Control.DeepSeq (NFData) import Control.Exception (Exception (..)) import Control.Monad.Class.MonadTime.SI +import NoThunks.Class (NoThunks) +import NoThunks.Class.Orphans () + import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map import Data.Sequence.Strict (StrictSeq) @@ -100,7 +105,7 @@ const_MAX_TX_SIZE_DISCREPANCY = 32 -- | Compact internal transaction key used by V2 state. newtype TxKey = TxKey { unTxKey :: Int } deriving stock (Eq, Ord, Show, Generic) - deriving newtype (Enum, NFData) + deriving newtype (Enum, NFData, NoThunks) -- | State which determines when a peer that advertised a txid may -- acknowledge it. @@ -114,6 +119,8 @@ data TxOwnerAckState deriving stock (Eq, Ord, Show, Generic) deriving anyclass NFData +instance NoThunks TxOwnerAckState + -- | Per-peer advertisement state for a tx. -- newtype TxAdvertiser = TxAdvertiser { @@ -149,6 +156,8 @@ data TxAttemptState deriving stock (Eq, Ord, Show, Generic) deriving anyclass NFData +instance NoThunks TxAttemptState + -- | The current download lease for a tx body. -- -- A tx is either currently leased to a peer until a deadline or it is @@ -158,7 +167,7 @@ data TxAttemptState data TxLease peeraddr = TxLeased !peeraddr !Time | TxClaimable !Time deriving stock (Eq, Show, Generic) - deriving anyclass NFData + deriving anyclass (NFData, NoThunks) -- | Shared per-tx state. -- @@ -181,7 +190,7 @@ data TxEntry peeraddr = TxEntry { txAttempts :: !(Map peeraddr TxAttemptState) } deriving stock (Eq, Show, Generic) - deriving anyclass NFData + deriving anyclass (NFData, NoThunks) -- | The next peer-local action chosen by the V2 worker thread. -- @@ -211,13 +220,13 @@ data PeerAction -- responses to the original request. data RequestedTxBatch = RequestedTxBatch { -- | The set of transaction keys requested in this batch. - requestedTxBatchSet :: !IntSet + requestedTxBatchSet :: !IntSet -- | Total expected size in bytes for all tx bodies in this batch. - , requestedTxBatchSize :: !SizeInBytes + , requestedTxBatchSize :: !SizeInBytes } deriving stock (Eq, Show, Generic) - deriving anyclass NFData + deriving anyclass (NFData, NoThunks) -- | Coarse phase of a peer worker thread. -- @@ -235,6 +244,8 @@ data PeerPhase deriving stock (Eq, Ord, Show, Generic) deriving anyclass NFData +instance NoThunks PeerPhase + -- | Peer usefulness score. -- -- Lower is better. The current score is also interpreted as milliseconds of @@ -244,7 +255,7 @@ data PeerScore = PeerScore { peerScoreTs :: !Time } deriving stock (Eq, Show, Generic) - deriving anyclass NFData + deriving anyclass (NFData, NoThunks) -- | Low-cost monotonic counters for the V2 peer protocol and coordination path. -- @@ -326,7 +337,7 @@ data PeerTxLocalState tx = PeerTxLocalState { peerDownloadedTxs :: !(IntMap tx) } deriving stock (Eq, Show, Generic) - deriving anyclass NFData + deriving anyclass (NFData, NoThunks) emptyPeerTxLocalState :: PeerTxLocalState tx emptyPeerTxLocalState = PeerTxLocalState { @@ -348,7 +359,7 @@ data SharedPeerState = SharedPeerState { sharedPeerGeneration :: !Word64 } deriving stock (Eq, Show, Generic) - deriving anyclass NFData + deriving anyclass (NFData, NoThunks) -- | Shared V2 state. -- @@ -368,7 +379,7 @@ data SharedTxState peeraddr txid = SharedTxState { sharedGeneration :: !Word64 } deriving stock (Eq, Show, Generic) - deriving anyclass NFData + deriving anyclass (NFData, NoThunks) type RetainedTxs = IntPSQ Time () diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index b7d7ce1f260..d33f331220f 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -36,10 +36,13 @@ import Data.Sequence.Strict qualified as StrictSeq import Data.Set qualified as Set import Data.Word (Word64) +import NoThunks.Class (NoThunks, unsafeNoThunks) + + + import Ouroboros.Network.Protocol.TxSubmission2.Type import Ouroboros.Network.TxSubmission.Inbound.V2.Policy -import Ouroboros.Network.TxSubmission.Inbound.V2.Registry - (updatePeerPhase) +import Ouroboros.Network.TxSubmission.Inbound.V2.Registry (updatePeerPhase) import Ouroboros.Network.TxSubmission.Inbound.V2.State import Ouroboros.Network.TxSubmission.Inbound.V2.Types @@ -88,6 +91,16 @@ tests = , testCaseSteps "updatePeerPhase wakes competing idle advertisers when a peer leaves idle" unit_updatePeerPhase_wakesCompetingAdvertisers ] +-- +-- NoThunks invariant checks +-- + +-- | Check that a value has no thunks in its fields. +checkNoThunks :: NoThunks a => String -> a -> Property +checkNoThunks name val = + let result = unsafeNoThunks val + in counterexample (name ++ ": " ++ show result) $ property True + -- -- InboundState properties -- @@ -207,7 +220,7 @@ sharedTxStateInvariant strength SharedTxState { || not (Map.null txAttempts) leaseLive TxClaimable {} = False - leaseLive TxLeased {} = True + leaseLive TxLeased {} = True checkTxEntry (k, txEntry@TxEntry { txLease, txAdvertiserCount, txAttempts }) = counterexample ("bad active tx entry " ++ show k ++ ": " ++ show txEntry) $ @@ -301,6 +314,7 @@ instance Arbitrary ArbSharedTxState where | sharedState == emptySharedTxState = [] | otherwise = ArbSharedTxState <$> shrinkSharedTxState sharedState + -- Verifies that handleReceivedTxIds interns new txids, adds claimable entries -- for them, and preserves unrelated shared-state entries. prop_handleReceivedTxIds_newEntries @@ -324,6 +338,8 @@ prop_handleReceivedTxIds_newEntries (Positive peeraddr) (ArbSharedTxState shared , sharedNextTxKey sharedState' === sharedNextTxKey sharedStateBase + length txidsAndSizes , conjoin (fmap checkExistingTxId (Map.toList (sharedTxIdToKey sharedStateBase))) , conjoin (fmap checkEntry txidsAndSizes) + , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedState'" sharedState' ] where sharedStateBase = ensurePeerAdvertisesTxKeys peeraddr [] sharedState0 @@ -369,6 +385,8 @@ prop_handleReceivedTxIds_knownToMempool (Positive peeraddr) txid0 txSize0 = , Map.lookup txid (sharedTxIdToKey sharedState') === Just key , IntMap.lookup (unTxKey key) (sharedKeyToTxId sharedState') === Just txid , sharedGeneration sharedState' === 1 + , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedState'" sharedState' ] where txid = abs txid0 + 1 @@ -394,6 +412,8 @@ prop_handleReceivedTxIds_retainedIsLocalOnly (Positive peeraddr) txid0 txSize0 = , peerAvailableTxIds peerState' === IntMap.empty , toList (peerUnacknowledgedTxIds peerState') === [key] , sharedState' === sharedState0 + , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedState'" sharedState' ] where txid = abs txid0 + 1 @@ -543,6 +563,8 @@ prop_handleReceivedTxs_buffersAndDropsOmitted (Positive peeraddr) txidA0 txidB0 , Map.lookup txidB (sharedTxIdToKey sharedState') === Nothing , IntMap.lookup kB (sharedKeyToTxId sharedState') === Nothing , sharedGeneration sharedState' === 1 + , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedState'" sharedState' ] where txidA = abs txidA0 + 1 @@ -602,6 +624,10 @@ prop_handleReceivedTxs_dropsLateBodies (Positive peeraddr) txid0 txSize0 = } , sharedTxTable sharedStateMempool' === IntMap.empty , retainedLookup k (sharedRetainedTxs sharedStateMempool') === Just retainedUntil + , checkNoThunks "peerStateRetained" (peerStateRetained' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "peerStateMempool" (peerStateMempool' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedStateRetained" sharedStateRetained' + , checkNoThunks "sharedStateMempool" sharedStateMempool' ] where txid = abs txid0 + 1 @@ -655,6 +681,8 @@ prop_handleReceivedTxs_penalizesOmittedAfterPrune (Positive peeraddr) txid0 txSi , Map.lookup txid (sharedTxIdToKey sharedState') === Nothing , IntMap.lookup k (sharedKeyToTxId sharedState') === Nothing , sharedGeneration sharedState' === 1 + , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedState'" sharedState' ] where txid = abs txid0 + 1 @@ -705,6 +733,8 @@ prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected (Positive peeraddr) txid , Map.lookup txidB (sharedTxIdToKey sharedState') === Nothing , IntMap.lookup kB (sharedKeyToTxId sharedState') === Nothing , sharedGeneration sharedState' === 1 + , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedState'" sharedState' ] where txidA = abs txidA0 + 1 @@ -746,6 +776,8 @@ prop_nextPeerAction_prioritisesSubmit (Positive peeraddr) txid0 txSize0 = [ txKey === key , peerState' === peerState0 , sharedState' === sharedState0 + , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedState'" sharedState' ] _ -> counterexample ("unexpected peer action: " ++ show peerAction) False where @@ -792,6 +824,8 @@ prop_nextPeerAction_claimsClaimableTx (Positive peerA0) (Positive peerB0) (Posit , peerRequestedTxs peerState' === IntSet.singleton k , txLease (lookupEntryOrFail key sharedState') === TxLeased peerA (addTime (interTxSpace defaultTxDecisionPolicy) now) + , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedState'" sharedState' ] _ -> counterexample ("unexpected peer action: " ++ show peerAction) False where @@ -875,6 +909,8 @@ prop_nextPeerAction_claimsExpiredLease (Positive oldOwner0) (Positive peerA0) (P , peerRequestedTxs peerState' === IntSet.singleton k , txLease (lookupEntryOrFail key sharedState') === TxLeased peerA (addTime (interTxSpace defaultTxDecisionPolicy) now) + , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedState'" sharedState' ] _ -> counterexample ("unexpected peer action: " ++ show peerAction) False where @@ -920,6 +956,8 @@ prop_nextPeerAction_requestsOversizedFirstTx (Positive peeraddr) txid0 (Positive , peerRequestedTxsSize peerState' === txSize , txLease (lookupEntryOrFail key sharedState') === TxLeased peeraddr (addTime (interTxSpace policy) now) + , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedState'" sharedState' ] _ -> counterexample ("unexpected peer action: " ++ show peerAction) False where @@ -1195,6 +1233,8 @@ prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peera conjoin [ txIdsToAcknowledge === 1 , peerUnacknowledgedTxIds resolvedPeerState' === StrictSeq.empty + , checkNoThunks "resolvedPeerState'" (resolvedPeerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "resolvedSharedState'" resolvedSharedState' ] _ -> counterexample ("unexpected resolved action: " ++ show resolvedAction) False ] @@ -1233,8 +1273,10 @@ prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peera [ peerUnacknowledgedTxIds unresolvedPeerState' === peerUnacknowledgedTxIds peerState0 , txAdvertiserCount (lookupEntryOrFail key unresolvedSharedState') === txAdvertiserCount (lookupEntryOrFail key unresolvedSharedState) + , checkNoThunks "unresolvedPeerState'" (unresolvedPeerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "unresolvedSharedState'" unresolvedSharedState' ] - (resolvedAction, resolvedPeerState', _) = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 resolvedSharedState + (resolvedAction, resolvedPeerState', resolvedSharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 resolvedSharedState -- Verifies that nextPeerActionPipelined does nothing when it can only -- acknowledge txids and cannot request new ones in the same step. @@ -1249,6 +1291,8 @@ prop_nextPeerActionPipelined_requiresAckAndReq (Positive peeraddr) txid0 _txSize conjoin [ peerUnacknowledgedTxIds peerState' === peerUnacknowledgedTxIds peerState0 , sharedState' === sharedState0 + , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedState'" sharedState' ] _ -> counterexample ("unexpected pipelined action: " ++ show peerAction) False where @@ -1832,8 +1876,8 @@ data PeerSeed = PeerSeed { } data PeerDerivedUsage = PeerDerivedUsage { - peerHasSubmitting :: !Bool - , peerHasRequestedTxs :: !Bool + peerHasSubmitting :: !Bool + , peerHasRequestedTxs :: !Bool } -- Generate a shared tx state with distinct active and retained entries. @@ -2062,7 +2106,7 @@ entryPeers (_, txAdvertisers, TxEntry { txLease, txAttempts }) = leaseOwner = case txLease of TxLeased owner _ -> [owner] - TxClaimable _ -> [] + TxClaimable _ -> [] -- Shrink shared state by dropping active txs, retained txs, or unused peers. shrinkSharedTxState @@ -2171,7 +2215,7 @@ genNonEmptySublist xs = do ys <- sublistOf xs case ys of [] -> (: []) <$> elements xs - _ -> pure ys + _ -> pure ys -- Generate distinct positive ints from a bounded shuffled range. genDistinctPositiveInts :: Int -> Gen [Int] @@ -2283,21 +2327,21 @@ lookupKeyOrFail :: TxId -> SharedTxState PeerAddr TxId -> TxKey lookupKeyOrFail txid st = case lookupTxKey txid st of Just txKey -> txKey - Nothing -> error "TxLogic.lookupKeyOrFail: missing tx key" + Nothing -> error "TxLogic.lookupKeyOrFail: missing tx key" -- Look up an active tx entry and fail fast in test setup code. lookupEntryOrFail :: TxKey -> SharedTxState PeerAddr TxId -> TxEntry PeerAddr lookupEntryOrFail (TxKey k) st = case IntMap.lookup k (sharedTxTable st) of Just txEntry -> txEntry - Nothing -> error "TxLogic.lookupEntryOrFail: missing tx entry" + Nothing -> error "TxLogic.lookupEntryOrFail: missing tx entry" -- Look up a shared peer and fail fast in test setup code. lookupPeerOrFail :: PeerAddr -> SharedTxState PeerAddr TxId -> SharedPeerState lookupPeerOrFail peeraddr st = case Map.lookup peeraddr (sharedPeers st) of Just sharedPeerState -> sharedPeerState - Nothing -> error "TxLogic.lookupPeerOrFail: missing peer" + Nothing -> error "TxLogic.lookupPeerOrFail: missing peer" -- Drop duplicate txids while keeping the first proposed size. dedupeBatch :: [(TxId, SizeInBytes)] -> [(TxId, SizeInBytes)] From 1e85081c09f74aff557eafdd16147e3c1465808c Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 14 Apr 2026 08:57:13 +0200 Subject: [PATCH 23/67] fixup: fix bug ack bug If there is at least one TX outstanding don't ack the final txid in the window --- .../Network/TxSubmission/Inbound/V2/State.hs | 11 +++- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 54 +++++++++++++++++++ 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 4a5635b3aed..82a3c6ba3f1 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -385,9 +385,18 @@ pickRequestTxIdsAction txIdRequestMode ctx@PeerActionContext { pacPolicy, pacPee numOfUnacked = StrictSeq.length (peerUnacknowledgedTxIds pacPeerState) numOfRequested = fromIntegral (peerRequestedTxIds pacPeerState) :: Int + hasOutstandingBodyReplies = + not (StrictSeq.null (peerRequestedTxBatches pacPeerState)) + keepOneUnackedForPipelinedRequest = + txIdRequestMode == AllowPipelinedTxIdRequests + && (numOfRequested > 0 || hasOutstandingBodyReplies) numOfAcked0 = StrictSeq.length ackablePrefix numOfAcked - | numOfRequested > 0 = min numOfAcked0 (max 0 (numOfUnacked - 1)) + -- A pipelined txid request becomes a non-blocking protocol message + -- while any txid or body reply is still in flight. The outbound side + -- requires at least one txid to remain unacknowledged in that case. + | keepOneUnackedForPipelinedRequest = + min numOfAcked0 (max 0 (numOfUnacked - 1)) | otherwise = numOfAcked0 acknowledgedTxIdsSeq = StrictSeq.take numOfAcked ackablePrefix diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index d33f331220f..64adc387ffe 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -78,6 +78,7 @@ tests = , testProperty "nextPeerAction keeps non-owner txids unacked until resolved" prop_nextPeerAction_nonOwnerWaitsUntilResolved , testProperty "nextPeerActionPipelined suppresses ack-only txid requests" prop_nextPeerActionPipelined_requiresAckAndReq , testProperty "nextPeerActionPipelined requests txids when it can ack and request" prop_nextPeerActionPipelined_requestsTxIds + , testCaseSteps "nextPeerActionPipelined keeps one txid unacked while body replies are in flight" unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply , testProperty "nextPeerActionPipelined opens a second outstanding body batch" prop_nextPeerActionPipelined_secondBodyBatch , testProperty "nextPeerActionPipelined does not open a third outstanding body batch" prop_nextPeerActionPipelined_noThirdBodyBatch , testProperty "nextPeerAction prunes expired retained txs" prop_nextPeerAction_prunesExpiredRetained @@ -1351,6 +1352,59 @@ prop_nextPeerActionPipelined_requestsTxIds (Positive peeraddr) txid0 _txSize0 = } (peerAction, peerState', sharedState') = nextPeerActionPipelined now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 +unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply + :: (String -> IO ()) + -> Assertion +unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply step = do + step "Run nextPeerActionPipelined with three ackable txids and one outstanding body batch" + case peerAction of + PeerRequestTxIds txIdsToAcknowledge txIdsToReq -> do + step "Assert pipelined txid requests keep one txid unacked while a body reply is still in flight" + txIdsToAcknowledge @?= 2 + assertBool ("expected positive txIdsToReq, got " ++ show txIdsToReq) (txIdsToReq > 0) + peerUnacknowledgedTxIds peerState' @?= StrictSeq.singleton keyC + peerRequestedTxIds peerState' @?= txIdsToReq + peerRequestedTxBatches peerState' @?= peerRequestedTxBatches peerState0 + sharedState' @?= sharedState0 + _ -> + assertFailure ("unexpected pipelined action: " ++ show peerAction) + where + peeraddr :: PeerAddr + peeraddr = 7 + txidA, txidB, txidC :: TxId + txidA = 1 + txidB = 2 + txidC = 3 + keyA = TxKey 0 + keyB = TxKey 1 + keyC = TxKey 2 + requestedBatch = mkRequestedTxBatch [keyA] 11 + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.fromList [keyA, keyB, keyC] + , peerRequestedTxs = IntSet.singleton (unTxKey keyA) + , peerRequestedTxBatches = StrictSeq.singleton requestedBatch + , peerRequestedTxsSize = requestedTxBatchSize requestedBatch + } + sharedState0 = emptySharedTxState + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + , sharedRetainedTxs = + retainedFromList + [ (unTxKey keyA, addTime 17 now) + , (unTxKey keyB, addTime 17 now) + , (unTxKey keyC, addTime 17 now) + ] + , sharedTxIdToKey = Map.fromList [(txidA, keyA), (txidB, keyB), (txidC, keyC)] + , sharedKeyToTxId = + IntMap.fromList + [ (unTxKey keyA, txidA) + , (unTxKey keyB, txidB) + , (unTxKey keyC, txidC) + ] + , sharedNextTxKey = 3 + } + (peerAction, peerState', sharedState') = + nextPeerActionPipelined now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + -- Verifies that nextPeerActionPipelined opens a second outstanding body -- batch when another downloadable tx is available. prop_nextPeerActionPipelined_secondBodyBatch From 272a14e05ff73510cee264cb3fd20b0737c97138 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 14 Apr 2026 13:26:34 +0200 Subject: [PATCH 24/67] fixup: remove TxNoAttempt We don't track peers that don't have an ongoing attempt any longer so TxNoAttempt isn't needed. --- .../Network/TxSubmission/Inbound/V2/State.hs | 16 +++++----------- .../Network/TxSubmission/Inbound/V2/Types.hs | 7 +------ .../Ouroboros/Network/TxSubmission/TxLogic.hs | 2 -- 3 files changed, 6 insertions(+), 19 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 82a3c6ba3f1..3ea9ba596f6 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -475,19 +475,13 @@ txSelectable PeerActionContext { pacNow, pacPeerAddr, pacPolicy, pacSharedPeerSt txOwnedByPeer TxEntry { txLease = TxLeased owner _ } = owner == pacPeerAddr txOwnedByPeer TxEntry { txLease = TxClaimable _ } = False - txPeerHasAttempt = - case txAttemptOfPeer pacPeerAddr txEntry of - Just TxNoAttempt -> False - Just _ -> True - Nothing -> False + txPeerHasAttempt = Map.member pacPeerAddr (txAttempts txEntry) + -- Safe to use Map.size here: by the time this guard is reached, + -- txSubmittingAnywhere has already returned False, so the map contains + -- only TxDownloading and TxBuffered entries. txActiveAttemptCount :: TxEntry peeraddr -> Int - txActiveAttemptCount TxEntry { txAttempts } = - length - [ () - | attempt <- Map.elems txAttempts - , attempt == TxDownloading || attempt == TxBuffered - ] + txActiveAttemptCount TxEntry { txAttempts } = Map.size txAttempts -- | Extract the peer's TxAttemptState for the TX entry, if it exists. diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 215d20f6b90..08a4913af03 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -136,12 +136,7 @@ newtype TxAdvertiser = TxAdvertiser { -- needs to know whether each advertiser is currently downloading, has a -- buffered body ready, or is submitting it to the mempool. data TxAttemptState - = -- | The peer has advertised this transaction but has not yet started - -- downloading the tx body. This is the initial state when a peer first - -- advertises a txid. - TxNoAttempt - - | -- | The peer is currently downloading the tx body from another peer. + = -- | The peer is currently downloading the tx body from another peer. -- The tx body is being fetched and has not yet been received. TxDownloading diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 64adc387ffe..4e0b15376e6 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -2131,8 +2131,6 @@ accumulatePeerUsage acc (_, _, TxEntry { txAttempts }) = updatePeerUsage peeraddr True False acc' TxBuffered -> updatePeerUsage peeraddr False False acc' - TxNoAttempt -> - acc' -- Merge one peer's submitting and inflight usage into the accumulator. updatePeerUsage From 8e5e151cded0474b2cfcaaddfaf5c46ebea8fcb7 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 14 Apr 2026 13:34:38 +0200 Subject: [PATCH 25/67] fixup: guard against currentTime < peerScoreTs currentTime < peerScoreTs shouldn't happen with getMonotinicTime but better be safe. --- .../lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 3ea9ba596f6..2e5411151ea 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -523,7 +523,7 @@ currentPeerScore :: TxDecisionPolicy currentPeerScore TxDecisionPolicy { scoreRate } currentTime PeerScore { peerScoreValue, peerScoreTs } | peerScoreValue == 0 = 0 - | currentTime == peerScoreTs = peerScoreValue + | currentTime <= peerScoreTs = peerScoreValue | otherwise = max 0 $ peerScoreValue - realToFrac (diffTime currentTime peerScoreTs) * scoreRate peerClaimDelay :: TxDecisionPolicy From d92e659366dc6b664867e01ec9a2aa8a98754562 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 14 Apr 2026 13:52:25 +0200 Subject: [PATCH 26/67] fixup: use disjoin instead of foldr --- .../lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 2e5411151ea..c8d943b6079 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -714,8 +714,8 @@ peerAdvertisesAnyTxKey :: IntSet.IntSet -> SharedPeerState -> Bool -peerAdvertisesAnyTxKey targetKeys sharedPeerState = - IntSet.foldr (\k acc -> peerAdvertisesTxKey k sharedPeerState || acc) False targetKeys +peerAdvertisesAnyTxKey targetKeys SharedPeerState { sharedPeerAdvertisedTxKeys } = + not (IntSet.disjoint targetKeys sharedPeerAdvertisedTxKeys) advertisingPeersForTxKeysExcept :: Ord peeraddr From ff59fcec15419d6affb81c8b13d8a993988023cf Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 14 Apr 2026 14:15:17 +0200 Subject: [PATCH 27/67] fixup: short circuit 0 score calculations When the score is already 0 there is no need for forther floating point ops. --- .../Network/TxSubmission/Inbound/V2/Registry.hs | 11 +++++++---- .../Network/TxSubmission/Inbound/V2/State.hs | 8 +++++--- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index e1d60648f0b..2fe8cacfa1d 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -552,10 +552,13 @@ updatePeerPhase now policy peeraddr peerPhaseNew else st _ -> st -- TODO error? where - normalizePeerScore ps@PeerScore { peerScoreValue, peerScoreTs } = - let !drain = realToFrac (diffTime now peerScoreTs) * scoreRate policy - !drained = max 0 (peerScoreValue - drain) - in ps { peerScoreValue = drained, peerScoreTs = now } + normalizePeerScore ps@PeerScore { peerScoreValue } + | peerScoreValue == 0 = ps + | otherwise = + let PeerScore { peerScoreTs } = ps + !drain = realToFrac (diffTime now peerScoreTs) * scoreRate policy + !drained = max 0 (peerScoreValue - drain) + in ps { peerScoreValue = drained, peerScoreTs = now } phaseWakePeers peerPhaseOld | peerPhaseOld /= PeerIdle diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index c8d943b6079..92e4fabc123 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -530,9 +530,11 @@ peerClaimDelay :: TxDecisionPolicy -> Time -> PeerScore -> DiffTime -peerClaimDelay policy currentTime = - -- Delay contribution in milliseconds is peerScore / 20, then converted to seconds. - realToFrac . (/ 20000) . currentPeerScore policy currentTime +peerClaimDelay policy currentTime peerScore + | peerScoreValue peerScore == 0 = 0 + | otherwise = + -- Delay contribution in milliseconds is peerScore / 20, then converted to seconds. + realToFrac . (/ 20000) $ currentPeerScore policy currentTime peerScore txClaimReadyAt :: DiffTime -> TxEntry peeraddr -> Time txClaimReadyAt claimDelay TxEntry { txLease } = From 88acb57a1bc40e81b21c3f506586329e2337328a Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Thu, 16 Apr 2026 17:54:30 +0200 Subject: [PATCH 28/67] fixup: moar counters --- .../Network/TxSubmission/Inbound/V2.hs | 53 +++++---- .../TxSubmission/Inbound/V2/Registry.hs | 73 ++++++++++--- .../Network/TxSubmission/Inbound/V2/State.hs | 70 ++++++------ .../Network/TxSubmission/Inbound/V2/Types.hs | 103 +++++++++++++++--- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 13 ++- .../Ouroboros/Network/Tracing/TxSubmission.hs | 21 ++++ 6 files changed, 235 insertions(+), 98 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs index 16a6efab2f2..95a7170e825 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs @@ -128,17 +128,25 @@ txSubmissionInboundV2 serverIdle :: StatefulM (PeerTxLocalState tx) Z txid tx m serverIdle = StatefulM $ \peerState -> do now <- getMonotonicTime - (peerAction, peerState') <- runNextPeerAction now peerState + -- When the pipeline fully drains, emit the body-download episode + -- duration (covers all overlapping body and txid pipelined requests). + peerState' <- case peerDownloadStartTime peerState of + Nothing -> pure peerState + Just startTime -> do + addCounters mempty { txPipelineWaitMs = + diffTimeToMillis (now `diffTime` startTime) } + pure $ peerState { peerDownloadStartTime = Nothing } + (peerAction, peerState'') <- runNextPeerAction now peerState' case peerAction of PeerDoNothing generation mDelay -> do awaitSharedChange generation mDelay - continueWithStateM serverIdle peerState' + continueWithStateM serverIdle peerState'' PeerSubmitTxs txKeys -> - continueWithStateM (submitBufferedTxs txKeys serverIdle) peerState' + continueWithStateM (submitBufferedTxs txKeys serverIdle) peerState'' PeerRequestTxs txKeys -> - continueWithStateM (requestTxBodies Zero txKeys) peerState' - PeerRequestTxIds txIdsToAck txIdsToReq -> - continueWithStateM (serverReqTxIds Zero txIdsToAck txIdsToReq) peerState' + continueWithStateM (requestTxBodies Zero txKeys) peerState'' + PeerRequestTxIds _flavour txIdsToAck txIdsToReq -> + continueWithStateM (serverReqTxIds Zero txIdsToAck txIdsToReq) peerState'' -- | Submit buffered transaction bodies to the mempool. submitBufferedTxs :: forall (n :: N). @@ -174,6 +182,7 @@ txSubmissionInboundV2 rejectedCount = length rejectedForTrace delta = end `diffTime` start + addCounters mempty { txSubmissionWaitMs = diffTimeToMillis delta } score <- countRejectedTxs end rejectedCount traceWith tracer $ TraceTxSubmissionProcessed ProcessedTxCount { @@ -197,10 +206,16 @@ txSubmissionInboundV2 requestTxBodies n txKeys = StatefulM $ \peerState -> do txsToRequest <- resolveTxRequest peerState txKeys traceWith tracer (TraceTxInboundRequestTxs (Map.keys txsToRequest)) - addCounters mempty { txMessagesSent = 1 - , txsRequested = fromIntegral (Map.size txsToRequest) } + + -- Record the start of the download episode on the first outstanding + -- body request. Subsequent pipelined requests leave the start time + -- unchanged so we measure from first-send to last-receive. + sendTime <- getMonotonicTime + let peerState' = case peerDownloadStartTime peerState of + Nothing -> peerState { peerDownloadStartTime = Just sendTime } + Just _ -> peerState pure $ SendMsgRequestTxsPipelined txsToRequest - (continueWithStateM (continueAfterBodyRequests (Succ n)) peerState) + (continueWithStateM (continueAfterBodyRequests (Succ n)) peerState') -- Continue processing after receiving replies from the peer in pipelined mode. continueAfterReplies :: forall (n :: N). @@ -215,7 +230,7 @@ txSubmissionInboundV2 continueWithStateM (submitBufferedTxs txKeys (continueAfterReplies n)) peerState' PeerRequestTxs txKeys -> continueWithStateM (requestTxBodies n txKeys) peerState' - PeerRequestTxIds txIdsToAck txIdsToReq -> + PeerRequestTxIds _flavour txIdsToAck txIdsToReq -> continueWithStateM (serverReqTxIds n txIdsToAck txIdsToReq) peerState' PeerDoNothing {} -> pure $ continueWithState (handleReplies n) peerState' @@ -232,7 +247,7 @@ txSubmissionInboundV2 continueWithStateM (submitBufferedTxs txKeys (continueAfterReplies n)) peerState' PeerRequestTxs txKeys -> continueWithStateM (requestTxBodies n txKeys) peerState' - PeerRequestTxIds txIdsToAck txIdsToReq -> + PeerRequestTxIds _flavour txIdsToAck txIdsToReq -> continueWithStateM (serverReqTxIds n txIdsToAck txIdsToReq) peerState' PeerDoNothing {} -> pure $ continueWithState (handleReplies n) peerState' @@ -252,22 +267,20 @@ txSubmissionInboundV2 pure $ continueWithState (handleReplies n) peerState -- Non-pipelined request, may send a blocking request - serverReqTxIds Zero txIdsToAck txIdsToReq = StatefulM $ \peerState -> do - addCounters mempty { txIdMessagesSent = 1 - , txIdsRequested = fromIntegral txIdsToReq } + serverReqTxIds Zero txIdsToAck txIdsToReq = StatefulM $ \peerState -> if StrictSeq.null (peerUnacknowledgedTxIds peerState) - then + then do + sendTime <- getMonotonicTime pure $ SendMsgRequestTxIdsBlocking txIdsToAck txIdsToReq (traceWith tracer TraceTxInboundTerminated) (\txids -> do now <- getMonotonicTime + addCounters mempty { txIdBlockingWaitMs = diffTimeToMillis (now `diffTime` sendTime) } let txids' = NonEmpty.toList txids unless (length txids' <= fromIntegral txIdsToReq) $ throwIO ProtocolErrorTxIdsNotRequested - addCounters mempty { txIdRepliesReceived = 1 - , txIdsReceived = fromIntegral (length txids') } peerState' <- applyReceivedTxIds now txIdsToReq txids' peerState continueWithStateM serverIdle peerState') else @@ -278,9 +291,7 @@ txSubmissionInboundV2 -- Pipelined request at depth > 0. Sends a pipelined message and continues -- to @handleReplies@. - serverReqTxIds n@Succ{} txIdsToAck txIdsToReq = StatefulM $ \peerState -> do - addCounters mempty { txIdMessagesSent = 1 - , txIdsRequested = fromIntegral txIdsToReq } + serverReqTxIds n@Succ{} txIdsToAck txIdsToReq = StatefulM $ \peerState -> pure $ SendMsgRequestTxIdsPipelined txIdsToAck txIdsToReq @@ -304,8 +315,6 @@ txSubmissionInboundV2 CollectTxIds txIdsToReq txids -> do unless (length txids <= fromIntegral txIdsToReq) $ throwIO ProtocolErrorTxIdsNotRequested - addCounters mempty { txIdRepliesReceived = 1 - , txIdsReceived = fromIntegral (length txids) } now <- getMonotonicTime peerState' <- applyReceivedTxIds now txIdsToReq txids peerState continueWithStateM (continueAfterReplies n) peerState' diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 2fe8cacfa1d..79664f0d22f 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -159,14 +159,14 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar atomically $ modifyTVar sharedStateVar (registerPeer now) pure PeerTxAPI { awaitSharedChange = awaitSharedChangeImp sharedStateVar peeraddr - , runNextPeerAction = runNextPeerActionImp policy sharedStateVar peeraddr + , runNextPeerAction = runNextPeerActionImp policy sharedStateVar countersVar peeraddr , runNextPeerActionPipelined = runNextPeerActionPipelinedImp policy sharedStateVar - peeraddr + countersVar peeraddr , applyReceivedTxIds = applyReceivedTxIdsImp policy mempoolGetSnapshot sharedStateVar - peeraddr + countersVar peeraddr , applyReceivedTxs = applyReceivedTxsImp policy mempoolGetSnapshot sharedStateVar countersVar peeraddr - , applySubmittedTxs = applySubmittedTxsImp policy sharedStateVar peeraddr + , applySubmittedTxs = applySubmittedTxsImp policy sharedStateVar countersVar peeraddr , countRejectedTxs = countRejectedTxsImp policy sharedStateVar peeraddr , resolveTxRequest = resolveTxRequestImp sharedStateVar , resolveBufferedTxs = resolveBufferedTxsImp sharedStateVar @@ -292,6 +292,31 @@ writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' | sharedGeneration sharedState' == sharedGeneration0 = pure () | otherwise = writeTVar sharedStateVar sharedState' +-- | Update the counters for the action chosen by the peer scheduler. +-- +updateCountersForAction :: MonadSTM m + => TxSubmissionCountersVar m + -> PeerAction + -> STM m () +updateCountersForAction countersVar peerAction = + case peerAction of + PeerRequestTxIds flavour txIdsToAck txIdsToReq + | txIdsToAck /= 0 || txIdsToReq /= 0 -> + modifyTVar countersVar (<> mempty + { txIdMessagesSent = 1 + , txIdsRequested = fromIntegral txIdsToReq + , txIdBlockingReqsSent = case flavour of + TxIdsBlockingReq -> 1 + TxIdsPipelinedReq -> 0 + , txIdPipelinedReqsSent = case flavour of + TxIdsBlockingReq -> 0 + TxIdsPipelinedReq -> 1 + }) + PeerRequestTxs txKeys -> + modifyTVar countersVar (<> mempty { txMessagesSent = 1 + , txsRequested = fromIntegral (length txKeys) }) + _ -> pure () + -- | Compute the next action for this peer in non-pipelined mode. -- -- Returns the selected 'PeerAction', an updated peer-local state, and applies @@ -302,11 +327,12 @@ runNextPeerActionImp :: ( MonadSTM m , Ord txid ) => TxDecisionPolicy -> SharedTxStateVar m peeraddr txid + -> TxSubmissionCountersVar m -> peeraddr -> Time -> PeerTxLocalState tx -> m (PeerAction, PeerTxLocalState tx) -runNextPeerActionImp policy sharedStateVar peeraddr now peerState = atomically $ do +runNextPeerActionImp policy sharedStateVar countersVar peeraddr now peerState = atomically $ do sharedState <- readTVar sharedStateVar let sharedGeneration0 = sharedGeneration sharedState (peerAction, peerState', sharedState') = State.nextPeerAction now policy peeraddr @@ -314,6 +340,7 @@ runNextPeerActionImp policy sharedStateVar peeraddr now peerState = atomically $ sharedState'' = updatePeerPhase now policy peeraddr (peerPhaseForActionIdle peerAction) sharedState' writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState'' + updateCountersForAction countersVar peerAction return (peerAction, peerState') -- | Compute the next action for this peer in pipelined mode. @@ -326,20 +353,23 @@ runNextPeerActionPipelinedImp :: ( MonadSTM m , Ord txid ) => TxDecisionPolicy -> SharedTxStateVar m peeraddr txid + -> TxSubmissionCountersVar m -> peeraddr -> Time -> PeerTxLocalState tx -> m (PeerAction, PeerTxLocalState tx) -runNextPeerActionPipelinedImp policy sharedStateVar peeraddr now peerState = atomically $ do - sharedState <- readTVar sharedStateVar - let sharedGeneration0 = sharedGeneration sharedState - (peerAction, peerState', sharedState') = State.nextPeerActionPipelined now policy - peeraddr peerState sharedState - sharedState'' = updatePeerPhase now policy peeraddr - (peerPhaseForActionPipelined peeraddr peerAction sharedState') - sharedState' - writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState'' - return (peerAction, peerState') +runNextPeerActionPipelinedImp policy sharedStateVar countersVar peeraddr now peerState = + atomically $ do + sharedState <- readTVar sharedStateVar + let sharedGeneration0 = sharedGeneration sharedState + (peerAction, peerState', sharedState') = State.nextPeerActionPipelined now policy + peeraddr peerState sharedState + sharedState'' = updatePeerPhase now policy peeraddr + (peerPhaseForActionPipelined peeraddr peerAction sharedState') + sharedState' + writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState'' + updateCountersForAction countersVar peerAction + return (peerAction, peerState') -- | Process a batch of txids received from this peer. -- @@ -353,20 +383,23 @@ applyReceivedTxIdsImp :: ( MonadSTM m => TxDecisionPolicy -> STM m (MempoolSnapshot txid tx idx) -> SharedTxStateVar m peeraddr txid + -> TxSubmissionCountersVar m -> peeraddr -> Time -> NumTxIdsToReq -> [(txid, SizeInBytes)] -> PeerTxLocalState tx -> m (PeerTxLocalState tx) -applyReceivedTxIdsImp policy mempoolGetSnapshot sharedStateVar peeraddr now txIdsToReq - txidsAndSizes peerState = atomically $ do +applyReceivedTxIdsImp policy mempoolGetSnapshot sharedStateVar countersVar peeraddr now + txIdsToReq txidsAndSizes peerState = atomically $ do MempoolSnapshot { mempoolHasTx } <- mempoolGetSnapshot sharedState <- readTVar sharedStateVar let sharedGeneration0 = sharedGeneration sharedState (peerState', sharedState') = State.handleReceivedTxIds mempoolHasTx now policy peeraddr txIdsToReq txidsAndSizes peerState sharedState writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' + modifyTVar countersVar (<> mempty { txIdRepliesReceived = 1 + , txIdsReceived = fromIntegral (length txidsAndSizes) }) return peerState' -- | Process a batch of tx bodies received from this peer. @@ -414,19 +447,23 @@ applySubmittedTxsImp :: ( MonadSTM m , Ord txid ) => TxDecisionPolicy -> SharedTxStateVar m peeraddr txid + -> TxSubmissionCountersVar m -> peeraddr -> Time -> [TxKey] -> [TxKey] -> PeerTxLocalState tx -> m (PeerTxLocalState tx) -applySubmittedTxsImp policy sharedStateVar peeraddr now acceptedTxs rejectedTxs peerState = +applySubmittedTxsImp policy sharedStateVar countersVar peeraddr now acceptedTxs rejectedTxs + peerState = atomically $ do sharedState <- readTVar sharedStateVar let sharedGeneration0 = sharedGeneration sharedState let (peerState', sharedState') = State.handleSubmittedTxs now policy peeraddr acceptedTxs rejectedTxs peerState sharedState writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' + modifyTVar countersVar (<> mempty { txsAccepted = fromIntegral (length acceptedTxs) + , txsRejected = fromIntegral (length rejectedTxs) }) return peerState' -- | Update the peer's rejection score based on the number of txs rejected diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 92e4fabc123..a38230c2e1c 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -35,25 +35,25 @@ data TxIdRequestMode = AllowAnyTxIdRequests | AllowPipelinedTxIdRequests -- data PeerActionContext peeraddr txid tx = PeerActionContext { -- | Current time used for lease expiry and score decay decisions. - pacNow :: !Time, + pacNow :: !Time, -- | Decision policy that governs request, retry, and scoring limits. - pacPolicy :: !TxDecisionPolicy, + pacPolicy :: !TxDecisionPolicy, -- | Address of the peer whose next action is being chosen. - pacPeerAddr :: !peeraddr, + pacPeerAddr :: !peeraddr, -- | Current peer-local state after local pruning has been applied. - pacPeerState :: !(PeerTxLocalState tx), + pacPeerState :: !(PeerTxLocalState tx), -- | Shared tx-submission state after shared pruning has been applied. - pacSharedState :: !(SharedTxState peeraddr txid), + pacSharedState :: !(SharedTxState peeraddr txid), -- | This peer's shared state after pruning. pacSharedPeerState :: !SharedPeerState, -- | Score-derived delay this peer must wait after a tx becomes claimable. - pacClaimDelay :: !DiffTime + pacClaimDelay :: !DiffTime } data PeerActionChoice peeraddr = ChooseSubmit ![TxKey] | ChooseRequestTxs ![TxKey] !SizeInBytes !(IntMap.IntMap (TxEntry peeraddr)) - | ChooseRequestTxIds ![TxKey] !NumTxIdsToAck !NumTxIdsToReq !(StrictSeq.StrictSeq TxKey) + | ChooseRequestTxIds !TxIdsReqFlavour ![TxKey] !NumTxIdsToAck !NumTxIdsToReq !(StrictSeq.StrictSeq TxKey) | ChooseDoNothing !Word64 !(Maybe DiffTime) -- | Build a precomputed context for selecting the next action for a peer. @@ -156,7 +156,11 @@ pickPeerActionChoice txIdRequestMode ctx -- Pick TXids to ack and/or request more TXids. | Just (acknowledgedTxIds, txIdsToAcknowledge, txIdsToRequest, unacknowledgedTxIds') <- pickRequestTxIdsAction txIdRequestMode ctx = - ChooseRequestTxIds acknowledgedTxIds txIdsToAcknowledge txIdsToRequest unacknowledgedTxIds' + let flavour + | txIdRequestMode == AllowAnyTxIdRequests + , StrictSeq.null unacknowledgedTxIds' = TxIdsBlockingReq + | otherwise = TxIdsPipelinedReq + in ChooseRequestTxIds flavour acknowledgedTxIds txIdsToAcknowledge txIdsToRequest unacknowledgedTxIds' -- Do nothing | otherwise = ChooseDoNothing (peerGenerationOf (pacPeerAddr ctx) (pacSharedState ctx)) (nextWakeDelay ctx) @@ -172,9 +176,9 @@ applyPeerActionChoice ctx choice = applySubmitChoice ctx txsToSubmit ChooseRequestTxs txsToRequest txsToRequestSize txTable' -> applyRequestTxsChoice ctx txsToRequest txsToRequestSize txTable' - ChooseRequestTxIds acknowledgedTxIds txIdsToAcknowledge txIdsToRequest + ChooseRequestTxIds flavour acknowledgedTxIds txIdsToAcknowledge txIdsToRequest unacknowledgedTxIds' -> - applyRequestTxIdsChoice ctx acknowledgedTxIds txIdsToAcknowledge txIdsToRequest + applyRequestTxIdsChoice ctx flavour acknowledgedTxIds txIdsToAcknowledge txIdsToRequest unacknowledgedTxIds' ChooseDoNothing generation wakeDelay -> applyDoNothingChoice ctx generation wakeDelay @@ -227,13 +231,14 @@ applyRequestTxsChoice ctx txsToRequest txsToRequestSize txTable = applyRequestTxIdsChoice :: (Ord peeraddr, Ord txid) => PeerActionContext peeraddr txid tx + -> TxIdsReqFlavour -> [TxKey] -> NumTxIdsToAck -> NumTxIdsToReq -> StrictSeq.StrictSeq TxKey -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) -applyRequestTxIdsChoice ctx acknowledgedTxIds txIdsToAcknowledge txIdsToRequest unacknowledgedTxIds' = - ( PeerRequestTxIds txIdsToAcknowledge txIdsToRequest +applyRequestTxIdsChoice ctx flavour acknowledgedTxIds txIdsToAcknowledge txIdsToRequest unacknowledgedTxIds' = + ( PeerRequestTxIds flavour txIdsToAcknowledge txIdsToRequest , peerState'' , sharedState'' ) @@ -579,9 +584,9 @@ acknowledgeTxIds :: (Ord peeraddr, Ord txid) -> SharedTxState peeraddr txid acknowledgeTxIds _ [] st = st acknowledgeTxIds peeraddr acknowledgedTxIds st = - case IntSet.null removedKeys of - True -> st - False -> + if IntSet.null removedKeys + then st + else let st'' = IntSet.foldl' acknowledgeOne st' removedKeys in st'' { sharedGeneration = sharedGeneration st + 1 } where @@ -619,25 +624,20 @@ txIdAckable PeerActionContext { pacPeerAddr, pacPeerState, pacSharedPeerState, p | otherwise = case IntMap.lookup k (sharedTxTable pacSharedState) of Just txEntry@TxEntry { txLease, txAttempts } -> - if IntSet.member k (sharedPeerAdvertisedTxKeys pacSharedPeerState) - then - let ackWhenBuffered = - case txLease of - TxLeased owner _ -> owner == pacPeerAddr || Map.member pacPeerAddr txAttempts - TxClaimable _ -> Map.member pacPeerAddr txAttempts - in - if ackWhenBuffered - then - -- Ack the txid if we downloaded it and no other - -- peer is in the process of submitting it to the - -- mempool. - IntMap.member k (peerDownloadedTxs pacPeerState) - && not (txBufferedByPeer pacPeerAddr txEntry - && txSubmittingByOther pacPeerAddr txEntry) - else - False -- This becomes ackable once the tx is retained or later pruned. - else - True -- Safe late ack after this peer was pruned from the shared entry. + not (IntSet.member k (sharedPeerAdvertisedTxKeys pacSharedPeerState)) + || + let ackWhenBuffered = + case txLease of + TxLeased owner _ -> owner == pacPeerAddr || Map.member pacPeerAddr txAttempts + TxClaimable _ -> Map.member pacPeerAddr txAttempts + in + -- Ack the txid if we downloaded it and no other + -- peer is in the process of submitting it to the + -- mempool. + ackWhenBuffered + && IntMap.member k (peerDownloadedTxs pacPeerState) + && not (txBufferedByPeer pacPeerAddr txEntry + && txSubmittingByOther pacPeerAddr txEntry) Nothing -> True -- Safe late ack after the resolved tx was pruned from shared state. -- | Remove one transaction entry from all shared state maps by key. @@ -705,7 +705,7 @@ activeTxLive TxEntry { txLease, txAdvertiserCount, txAttempts } = || not (Map.null txAttempts) where leaseLive TxClaimable {} = False - leaseLive TxLeased {} = True + leaseLive TxLeased {} = True peerAdvertisesTxKey :: Int -> SharedPeerState -> Bool diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 08a4913af03..b6521ae4124 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -52,6 +52,7 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.Types , TxAttemptState (..) , TxLease (..) , TxEntry (..) + , TxIdsReqFlavour (..) , PeerAction (..) , PeerPhase (..) , PeerScore (..) @@ -68,6 +69,7 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.Types , emptyPeerScore , emptyPeerTxLocalState , emptySharedTxState + , diffTimeToMillis ) where import Control.DeepSeq (NFData) @@ -187,6 +189,14 @@ data TxEntry peeraddr = TxEntry { deriving stock (Eq, Show, Generic) deriving anyclass (NFData, NoThunks) +-- | Whether a txid request will be sent as a blocking or pipelined wire +-- message. +data TxIdsReqFlavour + = TxIdsBlockingReq + | TxIdsPipelinedReq + deriving stock (Eq, Show, Generic) + deriving anyclass (NFData, NoThunks) + -- | The next peer-local action chosen by the V2 worker thread. -- -- V2 drives progress from the peer thread itself. Shared state only decides @@ -196,9 +206,9 @@ data PeerAction = -- | No immediate work is available. The peer should wait for either a -- shared-state generation change or the optional timeout. PeerDoNothing !Word64 !(Maybe DiffTime) - | -- | Send a txid protocol message acknowledging the first argument and - -- requesting the second argument. - PeerRequestTxIds !NumTxIdsToAck !NumTxIdsToReq + | -- | Send a txid protocol message acknowledging the second argument and + -- requesting the third argument. + PeerRequestTxIds !TxIdsReqFlavour !NumTxIdsToAck !NumTxIdsToReq | -- | Request tx bodies for the given internally keyed txs from this peer. PeerRequestTxs ![TxKey] | -- | Submit the buffered txs identified by the given keys to the local @@ -259,16 +269,52 @@ data PeerScore = PeerScore { -- They are kept separate from 'SharedTxState' so that emission can read a -- small dedicated counter cell without scanning protocol state. data TxSubmissionCounters = TxSubmissionCounters { - txIdMessagesSent :: !Word64, - txIdsRequested :: !Word64, - txIdRepliesReceived :: !Word64, - txIdsReceived :: !Word64, - txMessagesSent :: !Word64, - txsRequested :: !Word64, - txRepliesReceived :: !Word64, - txsReceived :: !Word64, - txsOmitted :: !Word64, - lateBodies :: !Word64 + txIdMessagesSent :: !Word64, + -- ^ Number of txid request messages sent (@MsgRequestTxIds@, blocking or + -- pipelined). + txIdsRequested :: !Word64, + -- ^ Total number of txids requested across all txid request messages (sum + -- of @NumTxIdsToReq@ values). + txIdRepliesReceived :: !Word64, + -- ^ Number of txid reply messages received (@MsgReplyTxIds@). + txIdsReceived :: !Word64, + -- ^ Total number of txid-size pairs received across all txid replies. + txMessagesSent :: !Word64, + -- ^ Number of tx body request messages sent (@MsgRequestTxs@). + txsRequested :: !Word64, + -- ^ Total number of tx bodies requested across all body request messages. + txRepliesReceived :: !Word64, + -- ^ Number of tx body reply messages received (@MsgReplyTxs@). + txsReceived :: !Word64, + -- ^ Total number of tx bodies received across all body replies. + txsOmitted :: !Word64, + -- ^ Number of requested tx bodies the peer omitted from its reply. + lateBodies :: !Word64, + -- ^ Number of tx bodies received after the local state had already + -- resolved them (txid was found in the mempool before the body arrived). + txsAccepted :: !Word64, + -- ^ Tx bodies resolved into the mempool (includes txs found already present + -- before attempting submission). + txsRejected :: !Word64, + -- ^ Tx bodies rejected by the mempool. + txIdBlockingReqsSent :: !Word64, + -- ^ Txid request messages sent as blocking requests. + txIdPipelinedReqsSent :: !Word64, + -- ^ Txid request messages sent as pipelined (non-blocking) requests. + txIdBlockingWaitMs :: !Word64, + -- ^ Cumulative milliseconds spent waiting for replies to blocking txid + -- requests. High values indicate the system is mostly idle (no new + -- transactions available from peers). + txPipelineWaitMs :: !Word64, + -- ^ Cumulative milliseconds the pipeline was active, measured from the + -- first 'MsgRequestTxs' send until all pipelined requests (both body and + -- txid) have been replied to and the pipeline fully drains. Proxy for + -- the "loading" state where the peer is actively downloading transactions. + txSubmissionWaitMs :: !Word64 + -- ^ Cumulative milliseconds spent inside 'mempoolAddTxs'. Covers both + -- normal submission latency and time blocked due to a full mempool. + -- High values relative to the other duration fields indicate mempool + -- backpressure. } deriving stock (Eq, Show, Generic) deriving anyclass NFData @@ -284,7 +330,14 @@ instance Semigroup TxSubmissionCounters where txRepliesReceived = txRepliesReceived a + txRepliesReceived b, txsReceived = txsReceived a + txsReceived b, txsOmitted = txsOmitted a + txsOmitted b, - lateBodies = lateBodies a + lateBodies b + lateBodies = lateBodies a + lateBodies b, + txsAccepted = txsAccepted a + txsAccepted b, + txsRejected = txsRejected a + txsRejected b, + txIdBlockingReqsSent = txIdBlockingReqsSent a + txIdBlockingReqsSent b, + txIdPipelinedReqsSent = txIdPipelinedReqsSent a + txIdPipelinedReqsSent b, + txIdBlockingWaitMs = txIdBlockingWaitMs a + txIdBlockingWaitMs b, + txPipelineWaitMs = txPipelineWaitMs a + txPipelineWaitMs b, + txSubmissionWaitMs = txSubmissionWaitMs a + txSubmissionWaitMs b } instance Monoid TxSubmissionCounters where @@ -298,9 +351,20 @@ instance Monoid TxSubmissionCounters where txRepliesReceived = 0, txsReceived = 0, txsOmitted = 0, - lateBodies = 0 + lateBodies = 0, + txsAccepted = 0, + txsRejected = 0, + txIdBlockingReqsSent = 0, + txIdPipelinedReqsSent = 0, + txIdBlockingWaitMs = 0, + txPipelineWaitMs = 0, + txSubmissionWaitMs = 0 } +-- | Convert a 'DiffTime' to whole milliseconds, rounding to nearest. +diffTimeToMillis :: DiffTime -> Word64 +diffTimeToMillis dt = round (realToFrac dt * 1000 :: Double) + emptyPeerScore :: Time -> PeerScore emptyPeerScore scoreTs = PeerScore { peerScoreValue = 0, @@ -329,7 +393,11 @@ data PeerTxLocalState tx = PeerTxLocalState { -- | Tx bodies downloaded from this peer and buffered locally until they -- are either submitted to the mempool or superseded by shared-state -- resolution. - peerDownloadedTxs :: !(IntMap tx) + peerDownloadedTxs :: !(IntMap tx), + + -- | Time at which the first outstanding body-request batch was + -- sent in the current download episode. + peerDownloadStartTime :: !(Maybe Time) } deriving stock (Eq, Show, Generic) deriving anyclass (NFData, NoThunks) @@ -342,7 +410,8 @@ emptyPeerTxLocalState = PeerTxLocalState { peerRequestedTxBatches = StrictSeq.empty, peerRequestedTxsSize = 0, peerRequestedTxIds = 0, - peerDownloadedTxs = IntMap.empty + peerDownloadedTxs = IntMap.empty, + peerDownloadStartTime = Nothing } -- | Small shared view of peer state used for lease claiming and peer diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 4e0b15376e6..b4836d59d63 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -1155,7 +1155,7 @@ unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx :: (String -> IO ()) - unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx step = do step "Run nextPeerAction with an ackable retained tx followed by a blocked buffered tx" case peerAction of - PeerRequestTxIds txIdsToAcknowledge txIdsToReq -> do + PeerRequestTxIds _ txIdsToAcknowledge txIdsToReq -> do step "Assert only the safe prefix is acknowledged" txIdsToAcknowledge @?= 1 assertBool ("expected positive txIdsToReq, got " ++ show txIdsToReq) (txIdsToReq > 0) @@ -1223,14 +1223,14 @@ prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peera [ case unresolvedAction of PeerDoNothing _ _ -> unresolvedExpectations - PeerRequestTxIds txIdsToAcknowledge _ -> + PeerRequestTxIds _ txIdsToAcknowledge _ -> conjoin [ txIdsToAcknowledge === 0 , unresolvedExpectations ] _ -> counterexample ("unexpected unresolved action: " ++ show unresolvedAction) False , case resolvedAction of - PeerRequestTxIds txIdsToAcknowledge _ -> + PeerRequestTxIds _ txIdsToAcknowledge _ -> conjoin [ txIdsToAcknowledge === 1 , peerUnacknowledgedTxIds resolvedPeerState' === StrictSeq.empty @@ -1322,7 +1322,7 @@ prop_nextPeerActionPipelined_requestsTxIds -> Property prop_nextPeerActionPipelined_requestsTxIds (Positive peeraddr) txid0 _txSize0 = case peerAction of - PeerRequestTxIds txIdsToAcknowledge txIdsToReq -> + PeerRequestTxIds _ txIdsToAcknowledge txIdsToReq -> conjoin [ txIdsToAcknowledge === 1 , counterexample ("expected positive txIdsToReq, got " ++ show txIdsToReq) (txIdsToReq > 0) @@ -1358,7 +1358,7 @@ unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply step = do step "Run nextPeerActionPipelined with three ackable txids and one outstanding body batch" case peerAction of - PeerRequestTxIds txIdsToAcknowledge txIdsToReq -> do + PeerRequestTxIds _ txIdsToAcknowledge txIdsToReq -> do step "Assert pipelined txid requests keep one txid unacked while a body reply is still in flight" txIdsToAcknowledge @?= 2 assertBool ("expected positive txIdsToReq, got " ++ show txIdsToReq) (txIdsToReq > 0) @@ -1913,7 +1913,8 @@ genPeerTxLocalState = sized $ \n -> do peerRequestedTxBatches, peerRequestedTxsSize, peerRequestedTxIds, - peerDownloadedTxs + peerDownloadedTxs, + peerDownloadStartTime = Nothing } where genAvailableTx key = do diff --git a/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs b/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs index 86dfbeee090..7fbdf4d2ad4 100644 --- a/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs +++ b/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs @@ -107,6 +107,13 @@ instance LogFormatting TxSubmissionCounters where , "txsReceived" .= txsReceived , "txsOmitted" .= txsOmitted , "lateBodies" .= lateBodies + , "txsAccepted" .= txsAccepted + , "txsRejected" .= txsRejected + , "txIdBlockingReqsSent" .= txIdBlockingReqsSent + , "txIdPipelinedReqsSent" .= txIdPipelinedReqsSent + , "txIdBlockingWaitMs" .= txIdBlockingWaitMs + , "txPipelineWaitMs" .= txPipelineWaitMs + , "txSubmissionWaitMs" .= txSubmissionWaitMs ] asMetrics TxSubmissionCounters {..} = @@ -120,6 +127,13 @@ instance LogFormatting TxSubmissionCounters where , IntM "txSubmission.txsReceived" (fromIntegral txsReceived) , IntM "txSubmission.txsOmitted" (fromIntegral txsOmitted) , IntM "txSubmission.lateBodies" (fromIntegral lateBodies) + , IntM "txSubmission.txsAccepted" (fromIntegral txsAccepted) + , IntM "txSubmission.txsRejected" (fromIntegral txsRejected) + , IntM "txSubmission.txIdBlockingReqsSent" (fromIntegral txIdBlockingReqsSent) + , IntM "txSubmission.txIdPipelinedReqsSent" (fromIntegral txIdPipelinedReqsSent) + , IntM "txSubmission.txIdBlockingWaitMs" (fromIntegral txIdBlockingWaitMs) + , IntM "txSubmission.txPipelineWaitMs" (fromIntegral txPipelineWaitMs) + , IntM "txSubmission.txSubmissionWaitMs" (fromIntegral txSubmissionWaitMs) ] instance MetaTrace TxSubmissionCounters where @@ -140,6 +154,13 @@ instance MetaTrace TxSubmissionCounters where , ("txSubmission.txsReceived", "number of tx bodies received") , ("txSubmission.txsOmitted", "number of requested tx bodies omitted from replies") , ("txSubmission.lateBodies", "number of tx bodies received after local resolution") + , ("txSubmission.txsAccepted", "number of tx bodies resolved into the mempool") + , ("txSubmission.txsRejected", "number of tx bodies rejected by the mempool") + , ("txSubmission.txIdBlockingReqsSent", "number of blocking txid request messages sent") + , ("txSubmission.txIdPipelinedReqsSent", "number of pipelined txid request messages sent") + , ("txSubmission.txIdBlockingWaitMs", "cumulative milliseconds spent waiting for blocking txid replies (idle state proxy)") + , ("txSubmission.txPipelineWaitMs", "cumulative milliseconds the pipeline was active from first body request until full drain (loading state proxy)") + , ("txSubmission.txSubmissionWaitMs", "cumulative milliseconds spent in mempoolAddTxs; high values indicate mempool backpressure") ] metricsDocFor _ = [] From 8d6c9991b52e58e539116db254ab7f4022056327 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 17 Apr 2026 08:50:24 +0200 Subject: [PATCH 29/67] fixup: retainedExpiredKeys quick exit The common case is that not TXs has expired. If so exit quickly. --- .../Network/TxSubmission/Inbound/V2/Types.hs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index b6521ae4124..591911fc161 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -534,14 +534,17 @@ retainedNextWake currentTime = {-# INLINE retainedNextWake #-} retainedExpiredKeys :: Time -> RetainedTxs -> IntSet -retainedExpiredKeys currentTime = - go IntSet.empty +retainedExpiredKeys currentTime retained = + -- Quick exit if no TX has expired. + case IntPSQ.findMin retained of + Just (_, earliest, _) | earliest <= currentTime -> go IntSet.empty retained + _ -> IntSet.empty where - go expired retained = - case IntPSQ.minView retained of - Just (k, retainUntil, (), retained') + go expired r = + case IntPSQ.minView r of + Just (k, retainUntil, (), r') | retainUntil <= currentTime -> - go (IntSet.insert k expired) retained' + go (IntSet.insert k expired) r' | otherwise -> expired Nothing -> From f4decc91947b870f14b2f494ab79650ed8d17bd6 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 17 Apr 2026 08:51:40 +0200 Subject: [PATCH 30/67] fixup: avoid IntMap.intersection for common case The common case is that no TX has been downloaded. --- .../Ouroboros/Network/TxSubmission/Inbound/V2/State.hs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index a38230c2e1c..e3ad48a2d82 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -92,9 +92,12 @@ mkPeerActionContext now policy peeraddr peerState sharedState = -- Remove downloaded tx bodies that are no longer in the shared state. peerState' = - peerState { - peerDownloadedTxs = IntMap.restrictKeys (peerDownloadedTxs peerState) (IntMap.keysSet (sharedTxTable sharedState')) - } + let downloaded = peerDownloadedTxs peerState + in if IntMap.null downloaded + then peerState + else peerState { + peerDownloadedTxs = IntMap.intersection downloaded (sharedTxTable sharedState') + } sharedPeerState' = case Map.lookup peeraddr (sharedPeers sharedState') of From f9739f8e0b813a2054199602835d2de141457277 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 17 Apr 2026 10:43:24 +0200 Subject: [PATCH 31/67] fixup: move peer score to local state Since peer score is now only a peer local thing move it into PeerTxLocalState and update it outside of atomic. --- .../Diffusion/Testnet/MiniProtocols.hs | 1 + .../bench/Bench/TxSubmissionV2Server.hs | 1 + .../Network/TxSubmission/Inbound/V2.hs | 24 +-- .../TxSubmission/Inbound/V2/Registry.hs | 93 ++--------- .../Network/TxSubmission/Inbound/V2/State.hs | 38 ++++- .../Network/TxSubmission/Inbound/V2/Types.hs | 12 +- .../Ouroboros/Network/TxSubmission/AppV2.hs | 1 + .../Ouroboros/Network/TxSubmission/TxLogic.hs | 155 ++++++++---------- 8 files changed, 146 insertions(+), 179 deletions(-) diff --git a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs index 6326d6daf17..b5f3dd93192 100644 --- a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs +++ b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs @@ -729,6 +729,7 @@ applications debugTracer txSubmissionInboundTracer _txSubmissionInboundDebug nod let server = txSubmissionInboundV2 txSubmissionInboundTracer NoTxSubmissionInitDelay + aaTxDecisionPolicy (getMempoolReader mempool) (getMempoolWriter duplicateTxVar mempool) getTxSize diff --git a/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs b/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs index d46dcaede58..038e7540bd4 100644 --- a/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs +++ b/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs @@ -95,6 +95,7 @@ runDirectServerBenchmark txSubmissionInboundV2 nullTracer NoTxSubmissionInitDelay + defaultTxDecisionPolicy (getMempoolReader inboundMempool) (getMempoolWriter duplicateTxIdsVar inboundMempool) getTxSize diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs index 95a7170e825..5733118bb10 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs @@ -16,7 +16,6 @@ module Ouroboros.Network.TxSubmission.Inbound.V2 , TxSubmissionInitDelay (..) ) where -import Data.Functor (void) import Data.List.NonEmpty qualified as NonEmpty import Data.Map.Strict qualified as Map import Data.Sequence.Strict qualified as StrictSeq @@ -37,6 +36,7 @@ import Ouroboros.Network.Protocol.TxSubmission2.Type (NumTxIdsToAck, SizeInBytes) import Ouroboros.Network.TxSubmission.Inbound.V2.Policy import Ouroboros.Network.TxSubmission.Inbound.V2.Registry as V2 +import Ouroboros.Network.TxSubmission.Inbound.V2.State qualified as State import Ouroboros.Network.TxSubmission.Inbound.V2.Types as V2 import Ouroboros.Network.TxSubmission.Mempool.Reader @@ -88,6 +88,7 @@ txSubmissionInboundV2 ) => Tracer m (TraceTxSubmissionInbound txid tx) -> TxSubmissionInitDelay + -> TxDecisionPolicy -> TxSubmissionMempoolReader txid tx idx m -> TxSubmissionMempoolWriter txid tx idx m err -> (tx -> SizeInBytes) @@ -96,6 +97,7 @@ txSubmissionInboundV2 txSubmissionInboundV2 tracer initDelay + policy TxSubmissionMempoolReader { mempoolGetSnapshot } TxSubmissionMempoolWriter { txId, mempoolAddTxs } txSize @@ -106,7 +108,6 @@ txSubmissionInboundV2 applyReceivedTxIds, applyReceivedTxs, applySubmittedTxs, - countRejectedTxs, resolveTxRequest, resolveBufferedTxs, startSubmittingTxs, @@ -136,7 +137,7 @@ txSubmissionInboundV2 addCounters mempty { txPipelineWaitMs = diffTimeToMillis (now `diffTime` startTime) } pure $ peerState { peerDownloadStartTime = Nothing } - (peerAction, peerState'') <- runNextPeerAction now peerState' + (peerAction, peerState'') <- runNextPeerAction now (State.drainPeerScore policy now peerState') case peerAction of PeerDoNothing generation mDelay -> do awaitSharedChange generation mDelay @@ -183,7 +184,8 @@ txSubmissionInboundV2 delta = end `diffTime` start addCounters mempty { txSubmissionWaitMs = diffTimeToMillis delta } - score <- countRejectedTxs end rejectedCount + peerState' <- applySubmittedTxs end resolvedTxKeys (fmap fst rejectedTxs) peerState + let (score, peerState'') = State.applyPeerRejections policy end rejectedCount peerState' traceWith tracer $ TraceTxSubmissionProcessed ProcessedTxCount { ptxcAccepted = length acceptedTxs, @@ -194,9 +196,7 @@ txSubmissionInboundV2 traceWith tracer (TraceTxInboundAddedToMempool (snd <$> acceptedTxs) delta) unless (null rejectedForTrace) $ traceWith tracer (TraceTxInboundRejectedFromMempool rejectedForTrace delta) - - peerState' <- applySubmittedTxs end resolvedTxKeys (fmap fst rejectedTxs) peerState - continueWithStateM k peerState' + continueWithStateM k peerState'' -- Request transaction bodies from the peer. requestTxBodies :: forall (n :: N). @@ -224,7 +224,7 @@ txSubmissionInboundV2 continueAfterReplies Zero = serverIdle continueAfterReplies n@Succ{} = StatefulM $ \peerState -> do now <- getMonotonicTime - (peerAction, peerState') <- runNextPeerActionPipelined now peerState + (peerAction, peerState') <- runNextPeerActionPipelined now (State.drainPeerScore policy now peerState) case peerAction of PeerSubmitTxs txKeys -> continueWithStateM (submitBufferedTxs txKeys (continueAfterReplies n)) peerState' @@ -241,7 +241,7 @@ txSubmissionInboundV2 -> StatefulM (PeerTxLocalState tx) (S n) txid tx m continueAfterBodyRequests n = StatefulM $ \peerState -> do now <- getMonotonicTime - (peerAction, peerState') <- runNextPeerActionPipelined now peerState + (peerAction, peerState') <- runNextPeerActionPipelined now (State.drainPeerScore policy now peerState) case peerAction of PeerSubmitTxs txKeys -> continueWithStateM (submitBufferedTxs txKeys (continueAfterReplies n)) peerState' @@ -331,9 +331,9 @@ txSubmissionInboundV2 throwIO protocolError now <- getMonotonicTime (penaltyCount, peerState') <- applyReceivedTxs now [ (txId tx, tx) | tx <- txs ] peerState - unless (penaltyCount == 0) $ - void $ countRejectedTxs now penaltyCount - continueWithStateM (continueAfterReplies n) peerState' + let peerState'' | penaltyCount == 0 = peerState' + | otherwise = snd (State.applyPeerRejections policy now penaltyCount peerState') + continueWithStateM (continueAfterReplies n) peerState'' -- Partition submitted transactions into accepted and rejected groups classifySubmittedTxs :: [(TxKey, txid)] diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 79664f0d22f..6cdee8862ac 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -111,12 +111,6 @@ data PeerTxAPI m txid tx = PeerTxAPI { -> PeerTxLocalState tx -> m (PeerTxLocalState tx), - -- | Update the peer's rejection score based on the number of txs rejected - -- by the mempool, or late/missing delivieries. - countRejectedTxs :: Time - -> Int - -> m Double, - -- | Resolve txids and advertised sizes for a batch of tx keys to request. resolveTxRequest :: PeerTxLocalState tx -> [TxKey] @@ -155,8 +149,7 @@ withPeer withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar countersVar peeraddr io = bracket (do - now <- getMonotonicTime - atomically $ modifyTVar sharedStateVar (registerPeer now) + atomically $ modifyTVar sharedStateVar registerPeer pure PeerTxAPI { awaitSharedChange = awaitSharedChangeImp sharedStateVar peeraddr , runNextPeerAction = runNextPeerActionImp policy sharedStateVar countersVar peeraddr @@ -167,7 +160,6 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar , applyReceivedTxs = applyReceivedTxsImp policy mempoolGetSnapshot sharedStateVar countersVar peeraddr , applySubmittedTxs = applySubmittedTxsImp policy sharedStateVar countersVar peeraddr - , countRejectedTxs = countRejectedTxsImp policy sharedStateVar peeraddr , resolveTxRequest = resolveTxRequestImp sharedStateVar , resolveBufferedTxs = resolveBufferedTxsImp sharedStateVar , startSubmittingTxs = atomically . modifyTVar sharedStateVar . @@ -180,8 +172,8 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar atomically $ modifyTVar sharedStateVar (unregisterPeer now)) io where - registerPeer :: Time -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid - registerPeer now st@SharedTxState { sharedPeers, sharedGeneration } = + registerPeer :: SharedTxState peeraddr txid -> SharedTxState peeraddr txid + registerPeer st@SharedTxState { sharedPeers, sharedGeneration } = st { sharedPeers = Map.insert peeraddr sharedPeerState sharedPeers, sharedGeneration = sharedGeneration + 1 @@ -189,7 +181,6 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar where sharedPeerState = SharedPeerState { sharedPeerPhase = PeerIdle, - sharedPeerScore = emptyPeerScore now, sharedPeerAdvertisedTxKeys = IntSet.empty, sharedPeerGeneration = 0 } @@ -337,7 +328,7 @@ runNextPeerActionImp policy sharedStateVar countersVar peeraddr now peerState = let sharedGeneration0 = sharedGeneration sharedState (peerAction, peerState', sharedState') = State.nextPeerAction now policy peeraddr peerState sharedState - sharedState'' = updatePeerPhase now policy peeraddr + sharedState'' = updatePeerPhase peeraddr (peerPhaseForActionIdle peerAction) sharedState' writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState'' updateCountersForAction countersVar peerAction @@ -364,7 +355,7 @@ runNextPeerActionPipelinedImp policy sharedStateVar countersVar peeraddr now pee let sharedGeneration0 = sharedGeneration sharedState (peerAction, peerState', sharedState') = State.nextPeerActionPipelined now policy peeraddr peerState sharedState - sharedState'' = updatePeerPhase now policy peeraddr + sharedState'' = updatePeerPhase peeraddr (peerPhaseForActionPipelined peeraddr peerAction sharedState') sharedState' writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState'' @@ -466,42 +457,6 @@ applySubmittedTxsImp policy sharedStateVar countersVar peeraddr now acceptedTxs , txsRejected = fromIntegral (length rejectedTxs) }) return peerState' --- | Update the peer's rejection score based on the number of txs rejected --- by the mempool. --- Returns the new score value for tracing. The score --- decays over time and affects fallback peer selection when leases expire. -countRejectedTxsImp :: ( MonadSTM m - , Ord peeraddr) - => TxDecisionPolicy - -> SharedTxStateVar m peeraddr txid - -> peeraddr - -> Time - -> Int - -> m Double -countRejectedTxsImp TxDecisionPolicy { scoreRate, scoreMax } sharedStateVar peeraddr now - rejectedCount = atomically $ stateTVar sharedStateVar $ - updatePeerRejects (fromIntegral rejectedCount) - where - updatePeerRejects n sharedState = - case Map.lookup peeraddr (sharedPeers sharedState) of - Nothing -> (0, sharedState) -- TODO this is an invariant violation - Just sharedPeerState@SharedPeerState { sharedPeerScore } -> - let sharedPeerScore' = updateRejects n sharedPeerScore - sharedPeerState' = sharedPeerState { sharedPeerScore = sharedPeerScore' } - sharedState' = sharedState { - sharedPeers = Map.insert peeraddr sharedPeerState' (sharedPeers sharedState), - sharedGeneration = sharedGeneration sharedState + 1 - } in - (peerScoreValue sharedPeerScore', sharedState') - - updateRejects 0 ps@PeerScore { peerScoreValue = 0 } = ps { peerScoreTs = now } - updateRejects n ps@PeerScore { peerScoreValue, peerScoreTs } = - let duration = diffTime now peerScoreTs - !drain = realToFrac duration * scoreRate - !drained = max 0 (peerScoreValue - drain) in - ps { peerScoreValue = min scoreMax (drained + n) - , peerScoreTs = now } - -- | Resolve txids and advertised sizes for a batch of tx keys to request. -- -- Looks up the real txid and size from peer-local state for building the @@ -548,55 +503,35 @@ resolveBufferedTxsImp sharedStateVar peerState txKeys = atomically $ do -- | Update a peer's phase. -- --- A phase change always bumps the shared generation and normalizes the moving --- peer's score by draining it to @now@. In addition: +-- A phase change always bumps the shared generation. In addition: -- -- * When a peer becomes 'PeerIdle', bump that peer's own generation so a -- 'PeerDoNothing' action computed before the phase change does not put that -- same peer thread to sleep on a stale generation. This makes its next -- 'awaitSharedChange' return immediately and re-run scheduling as an idle -- claimant. --- * When a peer becomes 'PeerIdle', bump that peer's own generation so it --- immediately re-runs scheduling against any txs whose score-derived claim --- delay may already have elapsed. +-- * When a peer leaves idle, bump idle advertisers so they can immediately +-- compete for any leases the departing peer held. updatePeerPhase :: Ord peeraddr - => Time - -> TxDecisionPolicy - -> peeraddr + => peeraddr -> PeerPhase -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid -updatePeerPhase now policy peeraddr peerPhaseNew +updatePeerPhase peeraddr peerPhaseNew st@SharedTxState { sharedPeers, sharedGeneration } = case Map.lookup peeraddr sharedPeers of Just sharedPeerState -> let peerPhaseOld = sharedPeerPhase sharedPeerState in if peerPhaseOld /= peerPhaseNew then - let sharedPeerScore' = - normalizePeerScore (sharedPeerScore sharedPeerState) - sharedPeerState' = - sharedPeerState { - sharedPeerPhase = peerPhaseNew, - sharedPeerScore = sharedPeerScore' - } - in - let st' = st { sharedPeers = Map.insert peeraddr - sharedPeerState' sharedPeers - , sharedGeneration = sharedGeneration + 1 } in - bumpIdlePeerGenerations (phaseWakePeers peerPhaseOld) st' + let sharedPeerState' = sharedPeerState { sharedPeerPhase = peerPhaseNew } + st' = st { sharedPeers = Map.insert peeraddr sharedPeerState' sharedPeers + , sharedGeneration = sharedGeneration + 1 } + in bumpIdlePeerGenerations (phaseWakePeers peerPhaseOld) st' else st _ -> st -- TODO error? where - normalizePeerScore ps@PeerScore { peerScoreValue } - | peerScoreValue == 0 = ps - | otherwise = - let PeerScore { peerScoreTs } = ps - !drain = realToFrac (diffTime now peerScoreTs) * scoreRate policy - !drained = max 0 (peerScoreValue - drain) - in ps { peerScoreValue = drained, peerScoreTs = now } - phaseWakePeers peerPhaseOld | peerPhaseOld /= PeerIdle , peerPhaseNew == PeerIdle = Set.singleton peeraddr diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index e3ad48a2d82..bd6868bc3de 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -12,6 +12,8 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.State , advertisingPeersForTxKeysExcept , advertisingPeersForTxExcept , removeAdvertisingPeersForResolvedTx + , drainPeerScore + , applyPeerRejections ) where import Control.Monad.Class.MonadTime.SI (DiffTime, Time, addTime, diffTime) @@ -74,7 +76,7 @@ mkPeerActionContext now policy peeraddr peerState sharedState = pacPeerState = peerState', pacSharedState = sharedState', pacSharedPeerState = sharedPeerState', - pacClaimDelay = peerClaimDelay policy now (sharedPeerScore sharedPeerState') + pacClaimDelay = peerClaimDelay policy now (peerScore peerState') } where -- Remove expired retained TX keys from all shared state tables. @@ -544,6 +546,40 @@ peerClaimDelay policy currentTime peerScore -- Delay contribution in milliseconds is peerScore / 20, then converted to seconds. realToFrac . (/ 20000) $ currentPeerScore policy currentTime peerScore +-- | Decay the peer's score to @now@, updating the timestamp. +drainPeerScore :: TxDecisionPolicy + -> Time + -> PeerTxLocalState tx + -> PeerTxLocalState tx +drainPeerScore policy now peerState@PeerTxLocalState { peerScore } + | peerScoreValue peerScore == 0 = + peerState { peerScore = peerScore { peerScoreTs = now } } + | otherwise = + let drained = currentPeerScore policy now peerScore in + peerState { peerScore = PeerScore { peerScoreValue = drained, peerScoreTs = now } } +{-# INLINE drainPeerScore #-} + +-- | Apply a rejection penalty to the peer's local score. +-- Returns the new score value (for tracing) and the updated local state. +applyPeerRejections :: TxDecisionPolicy + -> Time + -> Int + -> PeerTxLocalState tx + -> (Double, PeerTxLocalState tx) +applyPeerRejections TxDecisionPolicy { scoreRate, scoreMax } now rejectedCount + peerState@PeerTxLocalState { peerScore } = + (peerScoreValue peerScore', peerState { peerScore = peerScore' }) + where + n = fromIntegral rejectedCount :: Double + peerScore' = applyRejects n peerScore + applyRejects 0 ps@PeerScore { peerScoreValue = 0 } = ps { peerScoreTs = now } + applyRejects n' ps@PeerScore { peerScoreValue, peerScoreTs } = + let duration = diffTime now peerScoreTs + !drain = realToFrac duration * scoreRate + !drained = max 0 (peerScoreValue - drain) in + ps { peerScoreValue = min scoreMax (drained + n'), peerScoreTs = now } +{-# INLINE applyPeerRejections #-} + txClaimReadyAt :: DiffTime -> TxEntry peeraddr -> Time txClaimReadyAt claimDelay TxEntry { txLease } = addTime claimDelay claimableAt diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 591911fc161..20a9878454e 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -375,7 +375,7 @@ emptyPeerScore scoreTs = PeerScore { -- -- These are the pieces of state that naturally belong to the worker -- thread handling one peer. Shared arbitration state such as peer --- phase and peer score is kept separately in 'SharedPeerState'. +-- phase is kept separately in 'SharedPeerState'. data PeerTxLocalState tx = PeerTxLocalState { -- | Unacknowledged txids in the order advertised by the peer. peerUnacknowledgedTxIds :: !(StrictSeq TxKey), @@ -397,7 +397,11 @@ data PeerTxLocalState tx = PeerTxLocalState { -- | Time at which the first outstanding body-request batch was -- sent in the current download episode. - peerDownloadStartTime :: !(Maybe Time) + peerDownloadStartTime :: !(Maybe Time), + + -- | Usefulness score for this peer, tracking rejection penalties and + -- time-based decay. + peerScore :: !PeerScore } deriving stock (Eq, Show, Generic) deriving anyclass (NFData, NoThunks) @@ -411,14 +415,14 @@ emptyPeerTxLocalState = PeerTxLocalState { peerRequestedTxsSize = 0, peerRequestedTxIds = 0, peerDownloadedTxs = IntMap.empty, - peerDownloadStartTime = Nothing + peerDownloadStartTime = Nothing, + peerScore = emptyPeerScore (Time 0) } -- | Small shared view of peer state used for lease claiming and peer -- selection. data SharedPeerState = SharedPeerState { sharedPeerPhase :: !PeerPhase, - sharedPeerScore :: !PeerScore, sharedPeerAdvertisedTxKeys :: !IntSet, sharedPeerGeneration :: !Word64 } diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs index 198ec8cee7b..b64a28c203e 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs @@ -209,6 +209,7 @@ runTxSubmission tracer _tracerTxLogic st0 txDecisionPolicy = do let server = txSubmissionInboundV2 sayTracer NoTxSubmissionInitDelay + txDecisionPolicy (getMempoolReader inboundMempool) (getMempoolWriter duplicateTxIdsVar inboundMempool) getTxSize diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index b4836d59d63..4a456dfd4dd 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -290,7 +290,7 @@ instance Arbitrary ArbSharedPeerState where { sharedPeerGeneration = 0 } ] where - defaultPeerState = mkSharedPeerState PeerIdle (emptyPeerScore now) + defaultPeerState = mkSharedPeerState PeerIdle instance Arbitrary ArbPeerTxLocalState where arbitrary = ArbPeerTxLocalState <$> genPeerTxLocalState @@ -530,8 +530,8 @@ unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull step = do k = unTxKey key sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (peerA, mkSharedPeerState PeerIdle (emptyPeerScore now)) - , (peerB, mkSharedPeerState PeerIdle (emptyPeerScore now)) + [ (peerA, mkSharedPeerState PeerIdle) + , (peerB, mkSharedPeerState PeerIdle) ] } peerAState0 = emptyPeerTxLocalState @@ -790,7 +790,7 @@ prop_nextPeerAction_prioritisesSubmit (Positive peeraddr) txid0 txSize0 = sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr - (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased peeraddr (addTime 10 now) , txAdvertiserCount = 1 @@ -831,6 +831,7 @@ prop_nextPeerAction_claimsClaimableTx (Positive peerA0) (Positive peerB0) (Posit _ -> counterexample ("unexpected peer action: " ++ show peerAction) False where peerA = peerA0 + peerAScore = PeerScore 1 now peerB = peerB0 + 1000 peerC = peerC0 + 2000 distinctPeers = peerA /= peerB && peerA /= peerC && peerB /= peerC @@ -840,9 +841,9 @@ prop_nextPeerAction_claimsClaimableTx (Positive peerA0) (Positive peerB0) (Posit k = unTxKey key sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (PeerScore 1 now))) - , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (PeerScore 10 now))) - , (peerC, withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs (PeerScore 0 now))) + [ (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (peerC, withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs)) ] , sharedTxIdToKey = Map.singleton txid key , sharedKeyToTxId = IntMap.singleton k txid @@ -853,7 +854,8 @@ prop_nextPeerAction_claimsClaimableTx (Positive peerA0) (Positive peerB0) (Posit , txAttempts = Map.empty } } - peerState0 = emptyPeerTxLocalState { peerAvailableTxIds = IntMap.singleton k txSize } + peerState0 = emptyPeerTxLocalState { peerAvailableTxIds = IntMap.singleton k txSize + , peerScore = peerAScore } (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peerA peerState0 sharedState0 unit_nextPeerAction_claimsAtScoreDelayThreshold :: (String -> IO ()) -> Assertion @@ -878,7 +880,7 @@ unit_nextPeerAction_claimsAtScoreDelayThreshold step = do sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr - (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (PeerScore 20 now))) + (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) , sharedTxIdToKey = Map.singleton txid key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 @@ -888,7 +890,8 @@ unit_nextPeerAction_claimsAtScoreDelayThreshold step = do , txAttempts = Map.empty } } - peerState0 = emptyPeerTxLocalState { peerAvailableTxIds = IntMap.singleton k txSize } + peerState0 = emptyPeerTxLocalState { peerAvailableTxIds = IntMap.singleton k txSize + , peerScore = PeerScore 20 now } (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 @@ -917,6 +920,7 @@ prop_nextPeerAction_claimsExpiredLease (Positive oldOwner0) (Positive peerA0) (P where oldOwner = oldOwner0 peerA = peerA0 + 1000 + peerAScore = PeerScore 1 now peerB = peerB0 + 2000 distinctPeers = oldOwner /= peerA && oldOwner /= peerB && peerA /= peerB txid = abs txid0 + 1 @@ -925,9 +929,9 @@ prop_nextPeerAction_claimsExpiredLease (Positive oldOwner0) (Positive peerA0) (P k = unTxKey key sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (oldOwner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs (PeerScore 0 now))) - , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (PeerScore 1 now))) - , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (PeerScore 10 now))) + [ (oldOwner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs)) + , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) ] , sharedTxIdToKey = Map.singleton txid key , sharedKeyToTxId = IntMap.singleton k txid @@ -938,7 +942,8 @@ prop_nextPeerAction_claimsExpiredLease (Positive oldOwner0) (Positive peerA0) (P , txAttempts = Map.empty } } - peerState0 = emptyPeerTxLocalState { peerAvailableTxIds = IntMap.singleton k txSize } + peerState0 = emptyPeerTxLocalState { peerAvailableTxIds = IntMap.singleton k txSize + , peerScore = peerAScore } (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peerA peerState0 sharedState0 -- Verifies that nextPeerAction still requests an oversized first tx when it @@ -972,7 +977,7 @@ prop_nextPeerAction_requestsOversizedFirstTx (Positive peeraddr) txid0 (Positive } sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr - (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) , sharedTxIdToKey = Map.singleton txid key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 @@ -1014,9 +1019,9 @@ unit_nextPeerAction_skipsBlockedAvailableTxs step = do sharedState = emptySharedTxState { sharedPeers = Map.fromList [ (peeraddr, withAdvertisedTxKeys [blockedKey, claimableKey] - (mkSharedPeerState PeerIdle (emptyPeerScore testNow))) + (mkSharedPeerState PeerIdle)) , (otherPeer, withAdvertisedTxKeys [blockedKey] - (mkSharedPeerState PeerIdle (emptyPeerScore testNow))) + (mkSharedPeerState PeerIdle)) ] , sharedTxTable = IntMap.fromList [ (kBlocked, TxEntry @@ -1059,7 +1064,7 @@ prop_nextPeerAction_ownerSubmitsBuffered (Positive peeraddr) txid0 txSize0 = sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr - (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxClaimable (Time 0) , txAdvertiserCount = 1 @@ -1116,9 +1121,9 @@ unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx step = do sharedState0 = emptySharedTxState { sharedPeers = Map.fromList [ (peeraddr, withAdvertisedTxKeys [blockedKey, claimableKey] - (mkSharedPeerState PeerIdle (emptyPeerScore now))) + (mkSharedPeerState PeerIdle)) , (submittingPeer, withAdvertisedTxKeys [blockedKey] - (mkSharedPeerState PeerSubmittingToMempool (emptyPeerScore now))) + (mkSharedPeerState PeerSubmittingToMempool)) ] , sharedTxTable = IntMap.fromList [ (kBlocked, blockedEntry) @@ -1187,9 +1192,9 @@ unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx step = do sharedState0 = emptySharedTxState { sharedPeers = Map.fromList [ (peeraddr, withAdvertisedTxKeys [blockedKey] - (mkSharedPeerState PeerIdle (emptyPeerScore now))) + (mkSharedPeerState PeerIdle)) , (submittingPeer, withAdvertisedTxKeys [blockedKey] - (mkSharedPeerState PeerSubmittingToMempool (emptyPeerScore now))) + (mkSharedPeerState PeerSubmittingToMempool)) ] , sharedTxTable = IntMap.singleton kBlocked blockedEntry , sharedRetainedTxs = retainedSingleton kResolved (addTime 17 now) @@ -1246,8 +1251,8 @@ prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peera key = TxKey 0 k = unTxKey key sharedPeers0 = Map.fromList - [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) - , (peeraddr, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (peeraddr, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) ] unresolvedSharedState = emptySharedTxState { sharedPeers = sharedPeers0 @@ -1305,7 +1310,7 @@ prop_nextPeerActionPipelined_requiresAckAndReq (Positive peeraddr) txid0 _txSize , peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) , sharedRetainedTxs = retainedSingleton k (addTime 17 now) , sharedTxIdToKey = Map.singleton txid key , sharedKeyToTxId = IntMap.singleton k txid @@ -1344,7 +1349,7 @@ prop_nextPeerActionPipelined_requestsTxIds (Positive peeraddr) txid0 _txSize0 = , peerRequestedTxIds = 0 } sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) , sharedRetainedTxs = retainedSingleton k (addTime 17 now) , sharedTxIdToKey = Map.singleton txid key , sharedKeyToTxId = IntMap.singleton k txid @@ -1386,7 +1391,7 @@ unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply step = do , peerRequestedTxsSize = requestedTxBatchSize requestedBatch } sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) , sharedRetainedTxs = retainedFromList [ (unTxKey keyA, addTime 17 now) @@ -1448,7 +1453,7 @@ prop_nextPeerActionPipelined_secondBodyBatch (Positive peeraddr) txidA0 txidB0 t sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr - (withAdvertisedTxKeys [keyA, keyB] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + (withAdvertisedTxKeys [keyA, keyB] (mkSharedPeerState PeerIdle)) , sharedTxTable = IntMap.fromList [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading)) , (kB, TxEntry @@ -1511,7 +1516,7 @@ prop_nextPeerActionPipelined_noThirdBodyBatch (Positive peeraddr) txidA0 txidB0 sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr - (withAdvertisedTxKeys [keyA, keyB, keyC] (mkSharedPeerState PeerIdle (emptyPeerScore now))) + (withAdvertisedTxKeys [keyA, keyB, keyC] (mkSharedPeerState PeerIdle)) , sharedTxTable = IntMap.fromList [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading)) , (kB, mkTxEntry peeraddr txSizeB (Just TxDownloading)) @@ -1553,7 +1558,7 @@ prop_nextPeerAction_prunesExpiredRetained (Positive peeraddr) txid0 _txSize0 = idlePeerState :: PeerTxLocalState (Tx TxId) idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) , sharedRetainedTxs = retainedSingleton k now , sharedTxIdToKey = Map.singleton txid key , sharedKeyToTxId = IntMap.singleton k txid @@ -1589,7 +1594,7 @@ prop_nextPeerAction_keepsRetained (Positive peeraddr) txid0 _txSize0 = idlePeerState :: PeerTxLocalState (Tx TxId) idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle (emptyPeerScore now)) + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) , sharedRetainedTxs = retainedSingleton k retainUntil , sharedTxIdToKey = Map.singleton txid key , sharedKeyToTxId = IntMap.singleton k txid @@ -1629,8 +1634,8 @@ prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txid keyB = TxKey 1 idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } sharedPeers0 = Map.fromList - [ (peeraddr, withAdvertisedTxKeys [keyA] (mkSharedPeerState PeerIdle (emptyPeerScore now))) - , (owner, withAdvertisedTxKeys [keyA] (mkSharedPeerState PeerWaitingTxs (emptyPeerScore now))) + [ (peeraddr, withAdvertisedTxKeys [keyA] (mkSharedPeerState PeerIdle)) + , (owner, withAdvertisedTxKeys [keyA] (mkSharedPeerState PeerWaitingTxs)) ] leaseFirstState = emptySharedTxState { sharedPeers = sharedPeers0 @@ -1674,12 +1679,12 @@ prop_nextPeerAction_returnsPeerGeneration (Positive peeraddr) = sharedState0 = emptySharedTxState { sharedPeers = Map.fromList [ ( peeraddr - , (mkSharedPeerState PeerIdle (emptyPeerScore now)) + , (mkSharedPeerState PeerIdle) { sharedPeerGeneration = expectedGeneration } ) , ( peeraddr + 1000 - , (mkSharedPeerState PeerIdle (emptyPeerScore now)) + , (mkSharedPeerState PeerIdle) { sharedPeerGeneration = 11 } ) @@ -1715,9 +1720,9 @@ prop_handleSubmittedTxs_bumpsIdleAdvertisers (Positive owner0) (Positive peerA0) k = unTxKey key sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerSubmittingToMempool (emptyPeerScore now))) - , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) - , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs (emptyPeerScore now))) + [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerSubmittingToMempool)) + , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs)) ] , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased owner (addTime 10 now) @@ -1751,10 +1756,10 @@ unit_advertisingPeersForTxExcept_scansAuthoritativePeerSets step = do k = unTxKey key sharedState0 = baseState { sharedPeers = Map.fromList - [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) - , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) - , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) - , (unrelatedPeer, mkSharedPeerState PeerIdle (emptyPeerScore now)) + [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (unrelatedPeer, mkSharedPeerState PeerIdle) ] , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased owner (addTime 10 now) @@ -1789,10 +1794,10 @@ unit_removeAdvertisingPeersForResolvedTx_clearsAllAdvertisingPeers step = do k = unTxKey key sharedState0 = baseState { sharedPeers = Map.fromList - [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) - , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) - , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) - , (unrelatedPeer, mkSharedPeerState PeerIdle (emptyPeerScore now)) + [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (unrelatedPeer, mkSharedPeerState PeerIdle) ] , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased owner (addTime 10 now) @@ -1813,11 +1818,11 @@ unit_updatePeerPhase_wakesOnlyBecomingIdlePeer step = do other = 2 sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (peer, (mkSharedPeerState PeerWaitingTxs (emptyPeerScore now)) { sharedPeerGeneration = 5 }) - , (other, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 11 }) + [ (peer, (mkSharedPeerState PeerWaitingTxs) { sharedPeerGeneration = 5 }) + , (other, (mkSharedPeerState PeerIdle) { sharedPeerGeneration = 11 }) ] } - sharedState' = updatePeerPhase now defaultTxDecisionPolicy peer PeerIdle sharedState0 + sharedState' = updatePeerPhase peer PeerIdle sharedState0 unit_updatePeerPhase_wakesCompetingAdvertisers :: (String -> IO ()) -> Assertion unit_updatePeerPhase_wakesCompetingAdvertisers step = do @@ -1837,9 +1842,9 @@ unit_updatePeerPhase_wakesCompetingAdvertisers step = do k = unTxKey key sharedState0 = baseState { sharedPeers = Map.fromList - [ (leavingPeer, (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) { sharedPeerGeneration = 5 }) - , (competingPeer, (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle (emptyPeerScore now))) { sharedPeerGeneration = 11 }) - , (unrelatedPeer, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { sharedPeerGeneration = 17 }) + [ (leavingPeer, (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) { sharedPeerGeneration = 5 }) + , (competingPeer, (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) { sharedPeerGeneration = 11 }) + , (unrelatedPeer, (mkSharedPeerState PeerIdle) { sharedPeerGeneration = 17 }) ] , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxClaimable (Time 0) @@ -1847,18 +1852,15 @@ unit_updatePeerPhase_wakesCompetingAdvertisers step = do , txAttempts = Map.empty } } - sharedState' = updatePeerPhase now defaultTxDecisionPolicy leavingPeer PeerWaitingTxs sharedState0 + sharedState' = updatePeerPhase leavingPeer PeerWaitingTxs sharedState0 -- Generate a shared peer state. genSharedPeerState :: Gen SharedPeerState genSharedPeerState = do sharedPeerPhase <- elements [PeerIdle, PeerWaitingTxIds, PeerWaitingTxs, PeerSubmittingToMempool] - peerScoreValue <- choose (0 :: Double, 100) - peerScoreTs <- genSmallTime sharedPeerGeneration <- genSmallWord64 pure SharedPeerState { sharedPeerPhase, - sharedPeerScore = PeerScore peerScoreValue peerScoreTs, sharedPeerAdvertisedTxKeys = IntSet.empty, sharedPeerGeneration } @@ -1906,6 +1908,8 @@ genPeerTxLocalState = sized $ \n -> do peerDownloadedTxs <- IntMap.fromList <$> mapM genDownloadedTx downloadedKeys + peerScoreValue <- choose (0 :: Double, 100) + peerScoreTs <- genSmallTime pure PeerTxLocalState { peerUnacknowledgedTxIds, peerAvailableTxIds, @@ -1914,7 +1918,8 @@ genPeerTxLocalState = sized $ \n -> do peerRequestedTxsSize, peerRequestedTxIds, peerDownloadedTxs, - peerDownloadStartTime = Nothing + peerDownloadStartTime = Nothing, + peerScore = PeerScore peerScoreValue peerScoreTs } where genAvailableTx key = do @@ -1926,8 +1931,7 @@ genPeerTxLocalState = sized $ \n -> do pure (unTxKey key, mkTx (txIdForKey key) txSize) data PeerSeed = PeerSeed { - peerSeedScore :: !PeerScore - , peerSeedGeneration :: !Word64 + peerSeedGeneration :: !Word64 } data PeerDerivedUsage = PeerDerivedUsage { @@ -1962,15 +1966,8 @@ genSharedTxState = sized $ \n -> do pure $ buildSharedTxState peerSeeds activeEntries retainedEntries sharedGeneration where genPeerSeedEntry peeraddr = do - peerScoreValue <- choose (0 :: Double, 100) - peerScoreTs <- genSmallTime peerSeedGeneration <- genSmallWord64 - pure ( peeraddr - , PeerSeed { - peerSeedScore = PeerScore peerScoreValue peerScoreTs, - peerSeedGeneration - } - ) + pure (peeraddr, PeerSeed { peerSeedGeneration }) genRetainedEntry txid = do retainUntil <- genSharedExpiryTime @@ -2085,7 +2082,7 @@ deriveSharedPeers baseState peerSeeds activeEntries = addMissingPeerSeed acc peeraddr = Map.insertWith (\_ old -> old) peeraddr defaultPeerSeed acc - buildPeerState peeraddr PeerSeed { peerSeedScore, peerSeedGeneration } = + buildPeerState peeraddr PeerSeed { peerSeedGeneration } = let PeerDerivedUsage { peerHasSubmitting, peerHasRequestedTxs @@ -2096,17 +2093,13 @@ deriveSharedPeers baseState peerSeeds activeEntries = | otherwise = PeerIdle in SharedPeerState { sharedPeerPhase, - sharedPeerScore = peerSeedScore, sharedPeerAdvertisedTxKeys = Map.findWithDefault IntSet.empty peeraddr peerAdvertisedKeys, sharedPeerGeneration = peerSeedGeneration } defaultPeerSeed = - PeerSeed { - peerSeedScore = emptyPeerScore now, - peerSeedGeneration = 0 - } + PeerSeed { peerSeedGeneration = 0 } -- Default derived usage for a peer with no active work. emptyPeerDerivedUsage :: PeerDerivedUsage @@ -2192,11 +2185,8 @@ shrinkSharedTxState sharedState = ] peerSeeds = Map.map - (\SharedPeerState { sharedPeerScore, sharedPeerGeneration } -> - PeerSeed { - peerSeedScore = sharedPeerScore, - peerSeedGeneration = sharedPeerGeneration - }) + (\SharedPeerState { sharedPeerGeneration } -> + PeerSeed { peerSeedGeneration = sharedPeerGeneration }) (sharedPeers sharedState) usedPeers = foldl' (\peers activeEntry -> peers <> entryPeers activeEntry) [] activeEntries @@ -2319,11 +2309,10 @@ mkTx txid txSize = Tx } -- Construct a peer fixture with zeroed generation. -mkSharedPeerState :: PeerPhase -> PeerScore -> SharedPeerState -mkSharedPeerState sharedPeerPhase sharedPeerScore = +mkSharedPeerState :: PeerPhase -> SharedPeerState +mkSharedPeerState sharedPeerPhase = SharedPeerState { sharedPeerPhase, - sharedPeerScore, sharedPeerAdvertisedTxKeys = IntSet.empty, sharedPeerGeneration = 0 } @@ -2348,7 +2337,7 @@ ensurePeerAdvertisesTxKeys peeraddr txKeys st@SharedTxState { sharedPeers } = advertisedKeys = IntSet.fromList (map unTxKey txKeys) updatePeer Nothing = - Just (withAdvertisedTxKeys txKeys (mkSharedPeerState PeerIdle (emptyPeerScore now))) + Just (withAdvertisedTxKeys txKeys (mkSharedPeerState PeerIdle)) updatePeer (Just sharedPeerState) = Just (sharedPeerState { @@ -2596,7 +2585,7 @@ mkActiveSharedState allPeers ownerPeer resolvedAdvertisers txidsAndSizes = ] , sharedPeers = Map.fromList - [ (peeraddr, (mkSharedPeerState PeerIdle (emptyPeerScore now)) { + [ (peeraddr, (mkSharedPeerState PeerIdle) { sharedPeerAdvertisedTxKeys = Map.findWithDefault IntSet.empty peeraddr peerAdvertisedKeys }) From 5549a2a4905b6ad54a3a397aaef507f4e8081c08 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 21 Apr 2026 09:19:33 +0200 Subject: [PATCH 32/67] WIP: HasRawTxId instance The provided Eq/Ord instance for txid allocates memory. HasRawTxId lets the network layer convert to a type without that problems for our interal data structures. XXX Waiting on feedback from consensus. --- .../api/lib/Ouroboros/Network/Tx.hs | 29 +++++++ .../TxSubmission/Inbound/V2/Registry.hs | 12 +-- .../Network/TxSubmission/Inbound/V2/State.hs | 38 ++++----- .../Network/TxSubmission/Inbound/V2/Types.hs | 31 ++++---- ouroboros-network/ouroboros-network.cabal | 1 + .../Ouroboros/Network/TxSubmission/TxLogic.hs | 77 ++++++++++--------- .../Ouroboros/Network/TxSubmission/Types.hs | 7 ++ 7 files changed, 122 insertions(+), 73 deletions(-) create mode 100644 ouroboros-network/api/lib/Ouroboros/Network/Tx.hs diff --git a/ouroboros-network/api/lib/Ouroboros/Network/Tx.hs b/ouroboros-network/api/lib/Ouroboros/Network/Tx.hs new file mode 100644 index 00000000000..a51052e68a1 --- /dev/null +++ b/ouroboros-network/api/lib/Ouroboros/Network/Tx.hs @@ -0,0 +1,29 @@ +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} + +-- | Abstract view over transactions +-- +-- The network layer does not make any concrete assumptions about what +-- transactions or transaction identifiers look like. +module Ouroboros.Network.Tx + ( HasRawTxId (..) + , RawTxId (..) + ) where + +import Control.DeepSeq (NFData) +import Data.ByteString.Short (ShortByteString) +import NoThunks.Class (NoThunks) + +-- | Raw byte representation of a transaction identifier. +newtype RawTxId = RawTxId ShortByteString + deriving newtype (Eq, Ord, Show, NFData, NoThunks) + +-- | Abstract over transaction identifiers, providing access to their raw byte +-- representation for efficient comparison. +-- +-- Laws: +-- +-- * If @getRawTxId x == getRawTxId y@ then @x == y@ +-- (the raw bytes must uniquely identify the transaction) +class HasRawTxId txid where + getRawTxId :: txid -> RawTxId diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 6cdee8862ac..0ea0b38b07d 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -29,6 +29,7 @@ import Data.Void (Void) import Data.Word (Word64) import Ouroboros.Network.Protocol.TxSubmission2.Type +import Ouroboros.Network.Tx (HasRawTxId) import Ouroboros.Network.TxSubmission.Inbound.V2.Policy (TxDecisionPolicy (..)) import Ouroboros.Network.TxSubmission.Inbound.V2.State qualified as State import Ouroboros.Network.TxSubmission.Inbound.V2.Types @@ -138,6 +139,7 @@ withPeer , MonadTimer m , Ord peeraddr , Ord txid + , HasRawTxId txid ) => TxDecisionPolicy -> TxSubmissionMempoolReader txid tx idx m @@ -315,7 +317,7 @@ updateCountersForAction countersVar peerAction = -- Called from the main peer loop when not handling pipelined replies. runNextPeerActionImp :: ( MonadSTM m , Ord peeraddr - , Ord txid ) + , HasRawTxId txid ) => TxDecisionPolicy -> SharedTxStateVar m peeraddr txid -> TxSubmissionCountersVar m @@ -341,7 +343,7 @@ runNextPeerActionImp policy sharedStateVar countersVar peeraddr now peerState = -- pipelined protocol replies. runNextPeerActionPipelinedImp :: ( MonadSTM m , Ord peeraddr - , Ord txid ) + , HasRawTxId txid ) => TxDecisionPolicy -> SharedTxStateVar m peeraddr txid -> TxSubmissionCountersVar m @@ -370,7 +372,7 @@ runNextPeerActionPipelinedImp policy sharedStateVar countersVar peeraddr now pee -- peer-local state. applyReceivedTxIdsImp :: ( MonadSTM m , Ord peeraddr - , Ord txid ) + , HasRawTxId txid ) => TxDecisionPolicy -> STM m (MempoolSnapshot txid tx idx) -> SharedTxStateVar m peeraddr txid @@ -401,7 +403,7 @@ applyReceivedTxIdsImp policy mempoolGetSnapshot sharedStateVar countersVar peera -- missing from the reply, together with the updated peer-local state. applyReceivedTxsImp :: ( MonadSTM m , Ord peeraddr - , Ord txid ) + , HasRawTxId txid ) => TxDecisionPolicy -> STM m (MempoolSnapshot txid tx idx) -> SharedTxStateVar m peeraddr txid @@ -435,7 +437,7 @@ applyReceivedTxsImp policy mempoolGetSnapshot sharedStateVar countersVar peeradd -- Returns updated peer-local state. applySubmittedTxsImp :: ( MonadSTM m , Ord peeraddr - , Ord txid ) + , HasRawTxId txid ) => TxDecisionPolicy -> SharedTxStateVar m peeraddr txid -> TxSubmissionCountersVar m diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index bd6868bc3de..73cc8879815 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -27,6 +27,7 @@ import Data.Word (Word64) import Ouroboros.Network.Protocol.TxSubmission2.Type (NumTxIdsToAck, SizeInBytes) +import Ouroboros.Network.Tx (HasRawTxId (..)) import Ouroboros.Network.TxSubmission.Inbound.V2.Policy import Ouroboros.Network.TxSubmission.Inbound.V2.Types @@ -61,7 +62,7 @@ data PeerActionChoice peeraddr = -- | Build a precomputed context for selecting the next action for a peer. -- -- -mkPeerActionContext :: (Ord peeraddr, Ord txid) +mkPeerActionContext :: (Ord peeraddr, HasRawTxId txid) => Time -> TxDecisionPolicy -> peeraddr @@ -108,7 +109,7 @@ mkPeerActionContext now policy peeraddr peerState sharedState = error "TxSubmission.V2.mkPeerActionContext: missing peer" -- | Compute the next peer-local action. -nextPeerAction :: (Ord peeraddr, Ord txid) +nextPeerAction :: (Ord peeraddr, HasRawTxId txid) => Time -> TxDecisionPolicy -> peeraddr @@ -118,7 +119,7 @@ nextPeerAction :: (Ord peeraddr, Ord txid) nextPeerAction = nextPeerActionWithMode AllowAnyTxIdRequests -- | Pipelined version of nextPeerAction -nextPeerActionPipelined :: (Ord peeraddr, Ord txid) +nextPeerActionPipelined :: (Ord peeraddr, HasRawTxId txid) => Time -> TxDecisionPolicy -> peeraddr @@ -132,7 +133,7 @@ nextPeerActionPipelined = nextPeerActionWithMode AllowPipelinedTxIdRequests -- nextPeerActionWithMode handles body requests for txs this peer may currently -- fetch, tx submission for bodies buffered locally by this peer, and txid ack/request -- messages. -nextPeerActionWithMode :: (Ord peeraddr, Ord txid) +nextPeerActionWithMode :: (Ord peeraddr, HasRawTxId txid) => TxIdRequestMode -> Time -> TxDecisionPolicy @@ -171,7 +172,7 @@ pickPeerActionChoice txIdRequestMode ctx ChooseDoNothing (peerGenerationOf (pacPeerAddr ctx) (pacSharedState ctx)) (nextWakeDelay ctx) -- | Execute a chosen peer action and compute resulting state updates -applyPeerActionChoice :: (Ord peeraddr, Ord txid) +applyPeerActionChoice :: (Ord peeraddr, HasRawTxId txid) => PeerActionContext peeraddr txid tx -> PeerActionChoice peeraddr -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) @@ -234,7 +235,7 @@ applyRequestTxsChoice ctx txsToRequest txsToRequestSize txTable = -- | Construct a 'PeerRequestTxIds' action and update local and shared txid state. applyRequestTxIdsChoice - :: (Ord peeraddr, Ord txid) + :: (Ord peeraddr, HasRawTxId txid) => PeerActionContext peeraddr txid tx -> TxIdsReqFlavour -> [TxKey] @@ -616,7 +617,7 @@ updatePeerAdvertisedTxKeys peeraddr updateKeys st@SharedTxState { sharedPeers } error "TxSubmission.V2.updatePeerAdvertisedTxKeys: missing peer" -- | Acknowledge txids from a peer and update shared state. -acknowledgeTxIds :: (Ord peeraddr, Ord txid) +acknowledgeTxIds :: (Ord peeraddr, HasRawTxId txid) => peeraddr -> [TxKey] -> SharedTxState peeraddr txid @@ -680,7 +681,7 @@ txIdAckable PeerActionContext { pacPeerAddr, pacPeerState, pacSharedPeerState, p Nothing -> True -- Safe late ack after the resolved tx was pruned from shared state. -- | Remove one transaction entry from all shared state maps by key. -dropTxKey :: Ord txid +dropTxKey :: HasRawTxId txid => Int -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid @@ -695,11 +696,11 @@ dropTxKey k st@SharedTxState { sharedTxTable, sharedRetainedTxs, sharedTxIdToKey where deleteTxId txIdToKey = case IntMap.lookup k sharedKeyToTxId of - Just txid -> Map.delete txid txIdToKey + Just txid -> Map.delete (getRawTxId txid) txIdToKey Nothing -> txIdToKey -- | Remove transaction entries from all shared state maps by key. -dropTxKeys :: Ord txid +dropTxKeys :: HasRawTxId txid => IntSet.IntSet -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid @@ -716,11 +717,11 @@ dropTxKeys keys st@SharedTxState { sharedTxTable, sharedRetainedTxs, sharedTxIdT where deleteTxId txIdToKey k = case IntMap.lookup k sharedKeyToTxId of - Just txid -> Map.delete txid txIdToKey + Just txid -> Map.delete (getRawTxId txid) txIdToKey Nothing -> txIdToKey -- | Remove transaction keys that are no longer active from the shared state. -dropDeadActiveKeys :: Ord txid +dropDeadActiveKeys :: HasRawTxId txid => IntSet.IntSet -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid @@ -857,7 +858,7 @@ removeAdvertisingPeersForResolvedTxExcept currentPeer txKey@(TxKey k) st@SharedT -- already retained or already in the mempool are counted as late and dropped. -- Any requested tx omitted from the reply releases this peer's ownership. -- Late TXs contributes to the returned penalty count. -handleReceivedTxs :: (Ord peeraddr, Ord txid) +handleReceivedTxs :: (Ord peeraddr, HasRawTxId txid) => (txid -> Bool) -> Time -> TxDecisionPolicy @@ -951,7 +952,7 @@ handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = , downloadedAcc ) (txid, tx) = - case Map.lookup txid txidToKey of + case Map.lookup (getRawTxId txid) txidToKey of Nothing -> ( lateCountAcc + 1 , pendingKeysAcc @@ -1065,7 +1066,7 @@ handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = -- txid advertisements can be acknowledged without re-requesting the body. -- Txs rejected by the mempool release this peer's attempt state and advertiser -- slot so another advertiser may try later. -handleSubmittedTxs :: (Ord peeraddr, Ord txid) +handleSubmittedTxs :: (Ord peeraddr, HasRawTxId txid) => Time -> TxDecisionPolicy -> peeraddr @@ -1194,7 +1195,7 @@ markSubmittingTxs peeraddr txKeys st = -- advertises the txid may claim it once its score-derived delay has elapsed, -- which avoids pinning fresh work to the first peer that happened to announce -- it. -handleReceivedTxIds :: forall peeraddr txid tx. (Ord peeraddr, Ord txid) +handleReceivedTxIds :: forall peeraddr txid tx. (Ord peeraddr, HasRawTxId txid) => (txid -> Bool) -> Time -> TxDecisionPolicy @@ -1372,14 +1373,15 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize ) lookupOrInternTxId txid st@SharedTxState { sharedTxIdToKey, sharedKeyToTxId, sharedNextTxKey } - | Just key <- Map.lookup txid sharedTxIdToKey = (key, False, st) + | Just key <- Map.lookup rawId sharedTxIdToKey = (key, False, st) | otherwise = let key = TxKey sharedNextTxKey in ( key , True , st { - sharedTxIdToKey = Map.insert txid key sharedTxIdToKey, + sharedTxIdToKey = Map.insert rawId key sharedTxIdToKey, sharedKeyToTxId = IntMap.insert sharedNextTxKey txid sharedKeyToTxId, sharedNextTxKey = sharedNextTxKey + 1 } ) + where rawId = getRawTxId txid diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 20a9878454e..c8116d29416 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -86,6 +86,7 @@ import Data.Typeable (Typeable, eqT, (:~:) (Refl)) import GHC.Generics (Generic) import Ouroboros.Network.Protocol.TxSubmission2.Type +import Ouroboros.Network.Tx (HasRawTxId (..), RawTxId) import Data.Foldable (foldl') import Data.IntMap.Strict (IntMap) @@ -441,7 +442,7 @@ data SharedTxState peeraddr txid = SharedTxState { -- | Accepted txs retained locally for a bounded time so later txid -- advertisements can be acked without re-requesting the body. sharedRetainedTxs :: !RetainedTxs, - sharedTxIdToKey :: !(Map txid TxKey), + sharedTxIdToKey :: !(Map RawTxId TxKey), sharedKeyToTxId :: !(IntMap txid), sharedNextTxKey :: !Int, sharedGeneration :: !Word64 @@ -581,11 +582,12 @@ bumpIdlePeerGenerations peersToWake st@SharedTxState { sharedPeers } = sharedPeerState { sharedPeerGeneration = sharedPeerGeneration + 1 } | otherwise = sharedPeerState -lookupTxKey :: Ord txid +lookupTxKey :: HasRawTxId txid => txid -> SharedTxState peeraddr txid -> Maybe TxKey -lookupTxKey txid SharedTxState { sharedTxIdToKey } = Map.lookup txid sharedTxIdToKey +lookupTxKey txid SharedTxState { sharedTxIdToKey } = + Map.lookup (getRawTxId txid) sharedTxIdToKey resolveTxKey :: SharedTxState peeraddr txid -> TxKey @@ -595,30 +597,33 @@ resolveTxKey SharedTxState { sharedKeyToTxId } (TxKey k) = Just txid -> txid Nothing -> error "TxSubmission.V2.resolveTxKey: missing tx key" -internTxId :: Ord txid +internTxId :: HasRawTxId txid => txid -> SharedTxState peeraddr txid - -> (TxKey, SharedTxState peeraddr txid) + -> (RawTxId, TxKey, SharedTxState peeraddr txid) internTxId txid st@SharedTxState { sharedTxIdToKey, sharedKeyToTxId, sharedNextTxKey } - | Just key <- Map.lookup txid sharedTxIdToKey = (key, st) + | Just key <- Map.lookup rawId sharedTxIdToKey = (rawId, key, st) | otherwise = - let key = TxKey sharedNextTxKey - in ( key - , st { sharedTxIdToKey = Map.insert txid key sharedTxIdToKey + let key = TxKey sharedNextTxKey in + ( rawId + , key + , st { sharedTxIdToKey = Map.insert rawId key sharedTxIdToKey , sharedKeyToTxId = IntMap.insert sharedNextTxKey txid sharedKeyToTxId , sharedNextTxKey = sharedNextTxKey + 1 } ) + where + rawId = getRawTxId txid -internTxIds :: (Foldable f, Ord txid) +internTxIds :: (Foldable f, HasRawTxId txid) => f txid -> SharedTxState peeraddr txid - -> (Map txid TxKey, SharedTxState peeraddr txid) + -> (Map RawTxId TxKey, SharedTxState peeraddr txid) internTxIds txids st0 = foldl' step (Map.empty, st0) txids where step (acc, st) txid = - let (key, st') = internTxId txid st - in (Map.insert txid key acc, st') + let (rawId, key, st') = internTxId txid st in + (Map.insert rawId key acc, st') -- | Flag to enable/disable the usage of the new tx-submission logic. -- diff --git a/ouroboros-network/ouroboros-network.cabal b/ouroboros-network/ouroboros-network.cabal index d0de7b4e22d..472f531752d 100644 --- a/ouroboros-network/ouroboros-network.cabal +++ b/ouroboros-network/ouroboros-network.cabal @@ -103,6 +103,7 @@ library api Ouroboros.Network.Point Ouroboros.Network.Protocol.Limits Ouroboros.Network.SizeInBytes + Ouroboros.Network.Tx Ouroboros.Network.Util.ShowProxy build-depends: diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 4a456dfd4dd..d64acdbe614 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -41,6 +41,7 @@ import NoThunks.Class (NoThunks, unsafeNoThunks) import Ouroboros.Network.Protocol.TxSubmission2.Type +import Ouroboros.Network.Tx (HasRawTxId (..), RawTxId, getRawTxId) import Ouroboros.Network.TxSubmission.Inbound.V2.Policy import Ouroboros.Network.TxSubmission.Inbound.V2.Registry (updatePeerPhase) import Ouroboros.Network.TxSubmission.Inbound.V2.State @@ -161,6 +162,7 @@ sharedTxStateInvariant :: forall peeraddr txid. ( Ord peeraddr , Ord txid + , HasRawTxId txid , Show peeraddr , Show txid ) @@ -201,11 +203,11 @@ sharedTxStateInvariant strength SharedTxState { knownPeers = Map.keysSet sharedPeers keysRoundTripForward = - all (\(txid, txKey) -> IntMap.lookup (unTxKey txKey) sharedKeyToTxId == Just txid) + all (\(rawId, txKey) -> fmap getRawTxId (IntMap.lookup (unTxKey txKey) sharedKeyToTxId) == Just rawId) (Map.toList sharedTxIdToKey) keysRoundTripBackward = - all (\(k, txid) -> Map.lookup txid sharedTxIdToKey == Just (TxKey k)) + all (\(k, txid) -> Map.lookup (getRawTxId txid) sharedTxIdToKey == Just (TxKey k)) (IntMap.toList sharedKeyToTxId) advertisersForKey k = @@ -357,8 +359,8 @@ prop_handleReceivedTxIds_newEntries (Positive peeraddr) (ArbSharedTxState shared `IntSet.union` IntSet.fromList [ unTxKey (lookupKeyOrFail txid sharedState') | (txid, _) <- txidsAndSizes ] - checkExistingTxId (txid, txKey) = - Map.lookup txid (sharedTxIdToKey sharedState') === Just txKey + checkExistingTxId (rawId, txKey) = + Map.lookup rawId (sharedTxIdToKey sharedState') === Just txKey checkEntry (txid, _) = case IntMap.lookup (unTxKey (lookupKeyOrFail txid sharedState')) (sharedTxTable sharedState') of @@ -383,7 +385,7 @@ prop_handleReceivedTxIds_knownToMempool (Positive peeraddr) txid0 txSize0 = , toList (peerUnacknowledgedTxIds peerState') === [key] , IntMap.lookup (unTxKey key) (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) , retainedLookup (unTxKey key) (sharedRetainedTxs sharedState') === Just expectedRetainUntil - , Map.lookup txid (sharedTxIdToKey sharedState') === Just key + , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Just key , IntMap.lookup (unTxKey key) (sharedKeyToTxId sharedState') === Just txid , sharedGeneration sharedState' === 1 , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) @@ -426,7 +428,7 @@ prop_handleReceivedTxIds_retainedIsLocalOnly (Positive peeraddr) txid0 txSize0 = sharedState0 = ensurePeerAdvertisesTxKeys peeraddr [] $ emptySharedTxState { sharedRetainedTxs = retainedSingleton k retainUntil - , sharedTxIdToKey = Map.singleton txid key + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 , sharedGeneration = 7 @@ -561,7 +563,7 @@ prop_handleReceivedTxs_buffersAndDropsOmitted (Positive peeraddr) txidA0 txidB0 , fmap (Map.lookup peeraddr . txAttempts) (IntMap.lookup kA (sharedTxTable sharedState')) === Just (Just TxBuffered) , IntMap.lookup kB (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) , retainedLookup kB (sharedRetainedTxs sharedState') === Nothing - , Map.lookup txidB (sharedTxIdToKey sharedState') === Nothing + , Map.lookup (getRawTxId txidB) (sharedTxIdToKey sharedState') === Nothing , IntMap.lookup kB (sharedKeyToTxId sharedState') === Nothing , sharedGeneration sharedState' === 1 , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) @@ -679,7 +681,7 @@ prop_handleReceivedTxs_penalizesOmittedAfterPrune (Positive peeraddr) txid0 txSi , peerDownloadedTxs peerState' === (IntMap.empty :: IntMap.IntMap (Tx TxId)) , sharedTxTable sharedState' === IntMap.empty , retainedLookup k (sharedRetainedTxs sharedState') === Nothing - , Map.lookup txid (sharedTxIdToKey sharedState') === Nothing + , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Nothing , IntMap.lookup k (sharedKeyToTxId sharedState') === Nothing , sharedGeneration sharedState' === 1 , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) @@ -703,6 +705,7 @@ prop_handleReceivedTxs_penalizesOmittedAfterPrune (Positive peeraddr) txid0 txSi , peerRequestedTxBatches = StrictSeq.singleton (mkRequestedTxBatch [key] txSize) , peerRequestedTxsSize = txSize } + sharedStatePruned :: SharedTxState PeerAddr TxId sharedStatePruned = sharedStateBase { sharedTxTable = IntMap.empty , sharedRetainedTxs = retainedEmpty @@ -727,11 +730,11 @@ prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected (Positive peeraddr) txid [ peerDownloadedTxs peerState' === IntMap.empty , IntMap.lookup kA (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) , retainedLookup kA (sharedRetainedTxs sharedState') === Just expectedRetainUntil - , Map.lookup txidA (sharedTxIdToKey sharedState') === Just keyA + , Map.lookup (getRawTxId txidA) (sharedTxIdToKey sharedState') === Just keyA , IntMap.lookup kA (sharedKeyToTxId sharedState') === Just txidA , IntMap.lookup kB (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) , retainedLookup kB (sharedRetainedTxs sharedState') === Nothing - , Map.lookup txidB (sharedTxIdToKey sharedState') === Nothing + , Map.lookup (getRawTxId txidB) (sharedTxIdToKey sharedState') === Nothing , IntMap.lookup kB (sharedKeyToTxId sharedState') === Nothing , sharedGeneration sharedState' === 1 , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) @@ -796,7 +799,7 @@ prop_nextPeerAction_prioritisesSubmit (Positive peeraddr) txid0 txSize0 = , txAdvertiserCount = 1 , txAttempts = Map.singleton peeraddr TxBuffered } - , sharedTxIdToKey = Map.singleton txid key + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 } @@ -845,7 +848,7 @@ prop_nextPeerAction_claimsClaimableTx (Positive peerA0) (Positive peerB0) (Posit , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) , (peerC, withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs)) ] - , sharedTxIdToKey = Map.singleton txid key + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 , sharedTxTable = IntMap.singleton k TxEntry @@ -881,7 +884,7 @@ unit_nextPeerAction_claimsAtScoreDelayThreshold step = do { sharedPeers = Map.singleton peeraddr (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , sharedTxIdToKey = Map.singleton txid key + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 , sharedTxTable = IntMap.singleton k TxEntry @@ -933,7 +936,7 @@ prop_nextPeerAction_claimsExpiredLease (Positive oldOwner0) (Positive peerA0) (P , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) ] - , sharedTxIdToKey = Map.singleton txid key + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 , sharedTxTable = IntMap.singleton k TxEntry @@ -978,7 +981,7 @@ prop_nextPeerAction_requestsOversizedFirstTx (Positive peeraddr) txid0 (Positive sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , sharedTxIdToKey = Map.singleton txid key + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 , sharedTxTable = IntMap.singleton k (mkTxEntry peeraddr txSize Nothing) @@ -1070,7 +1073,7 @@ prop_nextPeerAction_ownerSubmitsBuffered (Positive peeraddr) txid0 txSize0 = , txAdvertiserCount = 1 , txAttempts = Map.singleton peeraddr TxBuffered } - , sharedTxIdToKey = Map.singleton txid key + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 } @@ -1134,8 +1137,8 @@ unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx step = do }) ] , sharedTxIdToKey = Map.fromList - [ (blockedTxid, blockedKey) - , (claimableTxid, claimableKey) + [ (getRawTxId blockedTxid, blockedKey) + , (getRawTxId claimableTxid, claimableKey) ] , sharedKeyToTxId = IntMap.fromList [ (kBlocked, blockedTxid) @@ -1199,8 +1202,8 @@ unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx step = do , sharedTxTable = IntMap.singleton kBlocked blockedEntry , sharedRetainedTxs = retainedSingleton kResolved (addTime 17 now) , sharedTxIdToKey = Map.fromList - [ (resolvedTxid, resolvedKey) - , (blockedTxid, blockedKey) + [ (getRawTxId resolvedTxid, resolvedKey) + , (getRawTxId blockedTxid, blockedKey) ] , sharedKeyToTxId = IntMap.fromList [ (kResolved, resolvedTxid) @@ -1261,7 +1264,7 @@ prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peera , txAdvertiserCount = 2 , txAttempts = Map.singleton owner TxBuffered } - , sharedTxIdToKey = Map.singleton txid key + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 } @@ -1312,7 +1315,7 @@ prop_nextPeerActionPipelined_requiresAckAndReq (Positive peeraddr) txid0 _txSize sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) , sharedRetainedTxs = retainedSingleton k (addTime 17 now) - , sharedTxIdToKey = Map.singleton txid key + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 } @@ -1351,7 +1354,7 @@ prop_nextPeerActionPipelined_requestsTxIds (Positive peeraddr) txid0 _txSize0 = sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) , sharedRetainedTxs = retainedSingleton k (addTime 17 now) - , sharedTxIdToKey = Map.singleton txid key + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 } @@ -1398,7 +1401,7 @@ unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply step = do , (unTxKey keyB, addTime 17 now) , (unTxKey keyC, addTime 17 now) ] - , sharedTxIdToKey = Map.fromList [(txidA, keyA), (txidB, keyB), (txidC, keyC)] + , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB), (getRawTxId txidC, keyC)] , sharedKeyToTxId = IntMap.fromList [ (unTxKey keyA, txidA) @@ -1462,7 +1465,7 @@ prop_nextPeerActionPipelined_secondBodyBatch (Positive peeraddr) txidA0 txidB0 t , txAttempts = Map.empty }) ] - , sharedTxIdToKey = Map.fromList [(txidA, keyA), (txidB, keyB)] + , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB)] , sharedKeyToTxId = IntMap.fromList [(kA, txidA), (kB, txidB)] , sharedNextTxKey = 2 } @@ -1526,7 +1529,7 @@ prop_nextPeerActionPipelined_noThirdBodyBatch (Positive peeraddr) txidA0 txidB0 , txAttempts = Map.empty }) ] - , sharedTxIdToKey = Map.fromList [(txidA, keyA), (txidB, keyB), (txidC, keyC)] + , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB), (getRawTxId txidC, keyC)] , sharedKeyToTxId = IntMap.fromList [(kA, txidA), (kB, txidB), (kC, txidC)] , sharedNextTxKey = 3 } @@ -1546,7 +1549,7 @@ prop_nextPeerAction_prunesExpiredRetained (Positive peeraddr) txid0 _txSize0 = [ peerState' === idlePeerState , IntMap.lookup k (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) , retainedLookup k (sharedRetainedTxs sharedState') === Nothing - , Map.lookup txid (sharedTxIdToKey sharedState') === Nothing + , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Nothing , IntMap.lookup k (sharedKeyToTxId sharedState') === Nothing , sharedGeneration sharedState' === sharedGeneration sharedState0 + 1 ] @@ -1560,7 +1563,7 @@ prop_nextPeerAction_prunesExpiredRetained (Positive peeraddr) txid0 _txSize0 = sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) , sharedRetainedTxs = retainedSingleton k now - , sharedTxIdToKey = Map.singleton txid key + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = max 1 (k + 1) } @@ -1580,7 +1583,7 @@ prop_nextPeerAction_keepsRetained (Positive peeraddr) txid0 _txSize0 = [ peerState' === idlePeerState , IntMap.lookup k (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) , retainedLookup k (sharedRetainedTxs sharedState') === Just retainUntil - , Map.lookup txid (sharedTxIdToKey sharedState') === Just key + , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Just key , IntMap.lookup k (sharedKeyToTxId sharedState') === Just txid , wakeDelay === diffTime retainUntil now , sharedGeneration sharedState' === sharedGeneration sharedState0 @@ -1596,7 +1599,7 @@ prop_nextPeerAction_keepsRetained (Positive peeraddr) txid0 _txSize0 = sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) , sharedRetainedTxs = retainedSingleton k retainUntil - , sharedTxIdToKey = Map.singleton txid key + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 } @@ -1645,7 +1648,7 @@ prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txid , txAttempts = Map.empty } , sharedRetainedTxs = retainedSingleton (unTxKey keyB) retainUntilLater - , sharedTxIdToKey = Map.fromList [(txidA, keyA), (txidB, keyB)] + , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB)] , sharedKeyToTxId = IntMap.fromList [(unTxKey keyA, txidA), (unTxKey keyB, txidB)] , sharedNextTxKey = 2 } @@ -1657,7 +1660,7 @@ prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txid , txAttempts = Map.empty } , sharedRetainedTxs = retainedSingleton (unTxKey keyB) retainUntilSoon - , sharedTxIdToKey = Map.fromList [(txidA, keyA), (txidB, keyB)] + , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB)] , sharedKeyToTxId = IntMap.fromList [(unTxKey keyA, txidA), (unTxKey keyB, txidB)] , sharedNextTxKey = 2 } @@ -1729,7 +1732,7 @@ prop_handleSubmittedTxs_bumpsIdleAdvertisers (Positive owner0) (Positive peerA0) , txAdvertiserCount = 3 , txAttempts = Map.singleton owner TxBuffered } - , sharedTxIdToKey = Map.singleton txid key + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 } @@ -2393,18 +2396,18 @@ dedupeBatch = nubBy ((==) `on` fst) freshBatchAgainstSharedState :: SharedTxState PeerAddr TxId -> [(TxId, SizeInBytes)] -> [(TxId, SizeInBytes)] freshBatchAgainstSharedState sharedState = reverse . snd . foldl' step (reserved, []) where - reserved = IntSet.fromList (Map.keys (sharedTxIdToKey sharedState)) + reserved = Set.fromList (Map.keys (sharedTxIdToKey sharedState)) step (used, acc) (txid, txSize) = let freshTxId = firstFreshTxId used txid in - (IntSet.insert freshTxId used, (freshTxId, txSize) : acc) + (Set.insert (getRawTxId freshTxId) used, (freshTxId, txSize) : acc) -- Find the first txid not present in the reserved set. -firstFreshTxId :: IntSet.IntSet -> TxId -> TxId +firstFreshTxId :: Set.Set RawTxId -> TxId -> TxId firstFreshTxId used = go where go txid - | IntSet.member txid used = go (txid + 1) + | Set.member (getRawTxId txid) used = go (txid + 1) | otherwise = txid mkReceiveDuplicateFixture :: Int -> Int -> ReceiveDuplicateFixture diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Types.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Types.hs index c53bba29b47..daf22597c05 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Types.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Types.hs @@ -1,3 +1,4 @@ +{-# OPTIONS_GHC -Wno-orphans #-} {-# LANGUAGE BangPatterns #-} {-# LANGUAGE BlockArguments #-} {-# LANGUAGE DeriveAnyClass #-} @@ -49,7 +50,9 @@ import Codec.CBOR.Decoding qualified as CBOR import Codec.CBOR.Encoding qualified as CBOR import Codec.CBOR.Read qualified as CBOR +import Data.Bits (shiftR) import Data.ByteString.Lazy (ByteString) +import Data.ByteString.Short qualified as SBS import Data.Either (partitionEithers) import Data.List qualified as List import Data.Sequence qualified as Seq @@ -61,6 +64,7 @@ import Network.TypedProtocol.Codec import Ouroboros.Network.Protocol.TxSubmission2.Codec import Ouroboros.Network.Protocol.TxSubmission2.Type +import Ouroboros.Network.Tx (HasRawTxId (..), RawTxId (..)) import Ouroboros.Network.TxSubmission.Inbound.V1 import Ouroboros.Network.TxSubmission.Mempool.Reader import Ouroboros.Network.TxSubmission.Mempool.Simple (Mempool) @@ -116,6 +120,9 @@ maxTxSize = 65536 type TxId = Int +instance HasRawTxId Int where + getRawTxId n = RawTxId (SBS.pack [ fromIntegral (n `shiftR` (i * 8)) | i <- [7, 6 .. 0] ]) + emptyMempool :: MonadSTM m => m (Mempool m txid (Tx txid)) emptyMempool = Mempool.empty From eef6e8c6271f05443c7264d7f4832061377acd3f Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 21 Apr 2026 10:19:54 +0200 Subject: [PATCH 33/67] fixup: remove immoral check Mark TXs as beeing submitted to the mempool in applySubmitChoice. With this change the immoral check to see if a TX is already in the mempool can be removed and we don't have to create an extra snapshot. --- .../Network/TxSubmission/Inbound/V2.hs | 32 +++---------------- .../TxSubmission/Inbound/V2/Registry.hs | 5 --- .../Network/TxSubmission/Inbound/V2/State.hs | 8 +++-- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 8 +++-- 4 files changed, 17 insertions(+), 36 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs index 5733118bb10..02a98f8afcd 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs @@ -98,7 +98,7 @@ txSubmissionInboundV2 tracer initDelay policy - TxSubmissionMempoolReader { mempoolGetSnapshot } + TxSubmissionMempoolReader {} TxSubmissionMempoolWriter { txId, mempoolAddTxs } txSize PeerTxAPI { @@ -110,7 +110,6 @@ txSubmissionInboundV2 applySubmittedTxs, resolveTxRequest, resolveBufferedTxs, - startSubmittingTxs, addCounters } = TxSubmissionServerPipelined $ do @@ -157,19 +156,9 @@ txSubmissionInboundV2 submitBufferedTxs txKeys k = StatefulM $ \peerState -> do bufferedTxs <- resolveBufferedTxs peerState txKeys - -- Flags the txs as on the way to the mempool, which temporarily blocks further - -- download attempts. - startSubmittingTxs txKeys - start <- getMonotonicTime - MempoolSnapshot { mempoolHasTx } <- atomically mempoolGetSnapshot - - -- Note that checking if the mempool contains a TX before - -- spending several ms attempting to add it to the pool has - -- been judged immoral. - let (alreadyInMempool, pendingSubmit) = partitionBufferedTxs mempoolHasTx bufferedTxs - submitted = [ (txKey, txid') | (txKey, txid', _) <- pendingSubmit ] - toSubmit = [ tx | (_, _, tx) <- pendingSubmit ] + let submitted = [ (txKey, txid') | (txKey, txid', _) <- bufferedTxs ] + toSubmit = [ tx | (_, _, tx) <- bufferedTxs ] (acceptedTxIds, _) <- if null toSubmit then pure ([], []) @@ -178,8 +167,8 @@ txSubmissionInboundV2 let (acceptedTxs, rejectedTxs) = classifySubmittedTxs submitted (Set.fromList acceptedTxIds) - resolvedTxKeys = [ txKey | (txKey, _, _) <- alreadyInMempool ] <> fmap fst acceptedTxs - rejectedForTrace = [ txid' | (_, txid', _) <- alreadyInMempool ] <> fmap snd rejectedTxs + resolvedTxKeys = fmap fst acceptedTxs + rejectedForTrace = fmap snd rejectedTxs rejectedCount = length rejectedForTrace delta = end `diffTime` start @@ -346,17 +335,6 @@ txSubmissionInboundV2 | Set.member txid' accepted = (entry : acceptedTxs, rejectedTxs) | otherwise = (acceptedTxs, entry : rejectedTxs) - -- Partition buffered transactions by mempool presence. - partitionBufferedTxs :: (txid -> Bool) - -> [(TxKey, txid, tx)] - -> ([(TxKey, txid, tx)], [(TxKey, txid, tx)]) - partitionBufferedTxs mempoolHasTx = - foldr step ([], []) - where - step entry@(_, txid', _) (alreadyInMempool, pendingSubmit) - | mempoolHasTx txid' = (entry : alreadyInMempool, pendingSubmit) - | otherwise = (alreadyInMempool, entry : pendingSubmit) - -- Collect transactions with size mismatches between advertised and actual. collectWrongSizedTxs :: Map.Map txid SizeInBytes -> Map.Map txid tx diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 0ea0b38b07d..46ab9e081c4 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -120,9 +120,6 @@ data PeerTxAPI m txid tx = PeerTxAPI { resolveBufferedTxs :: PeerTxLocalState tx -> [TxKey] -> m [(TxKey, txid, tx)], - -- | Mark the given tx keys as entering mempool submission phase in shared - -- state. - startSubmittingTxs :: [TxKey] -> m (), -- | Add a delta to the V2 monotonic counters. addCounters :: TxSubmissionCounters -> m () @@ -164,8 +161,6 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar , applySubmittedTxs = applySubmittedTxsImp policy sharedStateVar countersVar peeraddr , resolveTxRequest = resolveTxRequestImp sharedStateVar , resolveBufferedTxs = resolveBufferedTxsImp sharedStateVar - , startSubmittingTxs = atomically . modifyTVar sharedStateVar . - State.markSubmittingTxs peeraddr , addCounters = \delta -> atomically $ modifyTVar countersVar (<> delta) } ) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 73cc8879815..69ba2abde88 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -190,13 +190,17 @@ applyPeerActionChoice ctx choice = applyDoNothingChoice ctx generation wakeDelay -- | Construct a 'PeerSubmitTxs' action for buffered transactions. -applySubmitChoice :: PeerActionContext peeraddr txid tx +-- +-- Marks the selected txs as 'TxSubmitting' on this peer in the shared state so +-- other peers' skip them via 'txSubmittingByOther'. +applySubmitChoice :: Ord peeraddr + => PeerActionContext peeraddr txid tx -> [TxKey] -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) applySubmitChoice ctx txsToSubmit = ( PeerSubmitTxs txsToSubmit , pacPeerState ctx - , pacSharedState ctx + , markSubmittingTxs (pacPeerAddr ctx) txsToSubmit (pacSharedState ctx) ) -- | Construct a 'PeerRequestTxs' action and update local and shared tx state. diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index d64acdbe614..f8c250c056b 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -779,7 +779,9 @@ prop_nextPeerAction_prioritisesSubmit (Positive peeraddr) txid0 txSize0 = conjoin [ txKey === key , peerState' === peerState0 - , sharedState' === sharedState0 + -- Submit selection atomically marks the chosen tx as TxSubmitting + -- so concurrent peer decisions exclude it. + , sharedState' === markSubmittingTxs peeraddr [key] sharedState0 , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) , checkNoThunks "sharedState'" sharedState' ] @@ -1055,7 +1057,9 @@ prop_nextPeerAction_ownerSubmitsBuffered (Positive peeraddr) txid0 txSize0 = conjoin [ txKey === key , peerState' === peerState0 - , sharedState' === sharedState0 + -- Submit selection atomically marks the chosen tx as TxSubmitting + -- so concurrent peer decisions exclude it. + , sharedState' === markSubmittingTxs peeraddr [key] sharedState0 ] _ -> counterexample ("unexpected peer action: " ++ show peerAction) False where From a56b6eb43bca6a5cdc19b32a3060e826da46dbec Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 21 Apr 2026 14:11:24 +0200 Subject: [PATCH 34/67] fixup: central cleanup Move cleanup of expired retained TXs and orphaned TXs to the counter thread. --- .../TxSubmission/Inbound/V2/Registry.hs | 39 +++++++++++---- .../Network/TxSubmission/Inbound/V2/State.hs | 49 ++++++++++++------- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 5 +- 3 files changed, 65 insertions(+), 28 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 46ab9e081c4..a027d2f4448 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -16,6 +16,7 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.Registry import Control.Concurrent.Class.MonadSTM qualified as Lazy import Control.Concurrent.Class.MonadSTM.Strict +import Control.Monad (when) import Control.Monad.Class.MonadThrow import Control.Monad.Class.MonadTime.SI import Control.Monad.Class.MonadTimer.SI @@ -53,22 +54,40 @@ newTxSubmissionCountersVar -> m (TxSubmissionCountersVar m) newTxSubmissionCountersVar = newTVarIO --- | Periodically emit the current V2 counters when they change. +-- | Central bookkeeping thread for V2. +-- +-- Wakes every @'bufferedTxsMinLifetime' policy \/ 4@ seconds to run +-- 'State.sweepSharedState' on the shared tx state (retention expiry + orphan +-- GC). On a slower cadence (every 'countersInterval' seconds of elapsed time) +-- it also emits the current counters when they differ from the last emission. txCountersThreadV2 - :: (MonadDelay m, MonadSTM m) - => Tracer m TxSubmissionCounters + :: forall m peeraddr txid. + (MonadDelay m, MonadSTM m, HasRawTxId txid) + => TxDecisionPolicy + -> Tracer m TxSubmissionCounters -> TxSubmissionCountersVar m + -> SharedTxStateVar m peeraddr txid -> m Void -txCountersThreadV2 tracer countersVar = go mempty +txCountersThreadV2 policy tracer countersVar sharedStateVar = do + now <- getMonotonicTime + go mempty (addTime countersInterval now) where + sweepInterval :: DiffTime + sweepInterval = min 1 (bufferedTxsMinLifetime policy / 4) + + countersInterval :: DiffTime countersInterval = 7 - go !previous = do - threadDelay countersInterval - current <- readTVarIO countersVar - if current /= previous - then traceWith tracer current >> go current - else go previous + go !previous !nextEmitAt = do + threadDelay sweepInterval + now <- getMonotonicTime + atomically $ modifyTVar sharedStateVar (State.sweepSharedState now) + if now >= nextEmitAt + then do + current <- readTVarIO countersVar + when (current /= previous) $ traceWith tracer current + go current (addTime countersInterval now) + else go previous nextEmitAt -- | Peer-facing coordination API. -- diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 69ba2abde88..a2f504b7587 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -14,6 +14,7 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.State , removeAdvertisingPeersForResolvedTx , drainPeerScore , applyPeerRejections + , sweepSharedState ) where import Control.Monad.Class.MonadTime.SI (DiffTime, Time, addTime, diffTime) @@ -62,7 +63,7 @@ data PeerActionChoice peeraddr = -- | Build a precomputed context for selecting the next action for a peer. -- -- -mkPeerActionContext :: (Ord peeraddr, HasRawTxId txid) +mkPeerActionContext :: Ord peeraddr => Time -> TxDecisionPolicy -> peeraddr @@ -75,35 +76,22 @@ mkPeerActionContext now policy peeraddr peerState sharedState = pacPolicy = policy, pacPeerAddr = peeraddr, pacPeerState = peerState', - pacSharedState = sharedState', + pacSharedState = sharedState, pacSharedPeerState = sharedPeerState', pacClaimDelay = peerClaimDelay policy now (peerScore peerState') } where - -- Remove expired retained TX keys from all shared state tables. - -- When the retain timer expires, the peer gives up waiting for this txid - -- and will acknowledge it. We remove from all tables so the tx can be - -- re-advertised if needed. - sharedState' = - let expiredRetainedKeys = retainedExpiredKeys now (sharedRetainedTxs sharedState) - prunedSharedState = dropTxKeys expiredRetainedKeys sharedState in - if IntSet.null expiredRetainedKeys - then sharedState - else prunedSharedState { - sharedGeneration = sharedGeneration sharedState + 1 - } - -- Remove downloaded tx bodies that are no longer in the shared state. peerState' = let downloaded = peerDownloadedTxs peerState in if IntMap.null downloaded then peerState else peerState { - peerDownloadedTxs = IntMap.intersection downloaded (sharedTxTable sharedState') + peerDownloadedTxs = IntMap.intersection downloaded (sharedTxTable sharedState) } sharedPeerState' = - case Map.lookup peeraddr (sharedPeers sharedState') of + case Map.lookup peeraddr (sharedPeers sharedState) of Just sharedPeerState -> sharedPeerState Nothing -> error "TxSubmission.V2.mkPeerActionContext: missing peer" @@ -738,6 +726,33 @@ dropDeadActiveKeys keys st@SharedTxState { sharedTxTable } = Just txEntry -> not (activeTxLive txEntry) Nothing -> False +-- | Shared-state cleanup +-- +-- Drops two kinds of dead entries in one pass: +-- +-- * Retained entries whose retention deadline has passed. +-- * Orphaned 'sharedTxTable' entries. +-- +-- Bumps 'sharedGeneration' if anything changed so sleeping peer workers wake +-- and re-evaluate. +sweepSharedState :: HasRawTxId txid + => Time + -> SharedTxState peeraddr txid + -> SharedTxState peeraddr txid +sweepSharedState now st + | IntSet.null toDrop = st + | otherwise = (dropTxKeys toDrop st) { + sharedGeneration = sharedGeneration st + 1 + } + where + expiredRetained = retainedExpiredKeys now (sharedRetainedTxs st) + orphans = IntMap.keysSet (IntMap.filter isOrphan (sharedTxTable st)) + toDrop = expiredRetained `IntSet.union` orphans + + isOrphan TxEntry { txLease = TxLeased {} } = False + isOrphan TxEntry { txAdvertiserCount, txAttempts } = + txAdvertiserCount == 0 && Map.null txAttempts + -- | Is the TX entry alive? -- -- A TX entry is alive if there is a lease, there are advertisers for it or there are diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index f8c250c056b..ac41d27895a 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -1571,7 +1571,10 @@ prop_nextPeerAction_prunesExpiredRetained (Positive peeraddr) txid0 _txSize0 = , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = max 1 (k + 1) } - (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr idlePeerState sharedState0 + -- The central counters thread sweeps expired retained entries; emulate + -- that by calling the same helper before evaluating the peer decision. + sweptState = sweepSharedState now sharedState0 + (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr idlePeerState sweptState -- Verifies that nextPeerAction keeps unexpired retained txs and returns the -- wake delay until their expiry. From 32e12a97e1bee7a5aac93a42b689ab6485a9e199 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Wed, 22 Apr 2026 08:18:05 +0200 Subject: [PATCH 35/67] fixup: bump default limits Bump maxNumTxIdsToRequest to 6 and maxOutstandingTxBatchesPerPeer to 4. --- .../lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs | 4 ++-- .../tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs index e52ed3ee9da..24bbf00a529 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs @@ -72,10 +72,10 @@ instance NFData TxDecisionPolicy where defaultTxDecisionPolicy :: TxDecisionPolicy defaultTxDecisionPolicy = TxDecisionPolicy { - maxNumTxIdsToRequest = 3, + maxNumTxIdsToRequest = 6, maxUnacknowledgedTxIds = 10, -- must be the same as txSubmissionMaxUnacked txsSizeInflightPerPeer = max_TX_SIZE * 6, - maxOutstandingTxBatchesPerPeer = 2, + maxOutstandingTxBatchesPerPeer = 4, txInflightMultiplicity = 2, bufferedTxsMinLifetime = 2, scoreRate = 0.1, diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index ac41d27895a..78398bb3b03 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -1537,7 +1537,8 @@ prop_nextPeerActionPipelined_noThirdBodyBatch (Positive peeraddr) txidA0 txidB0 , sharedKeyToTxId = IntMap.fromList [(kA, txidA), (kB, txidB), (kC, txidC)] , sharedNextTxKey = 3 } - (peerAction, peerState', sharedState') = nextPeerActionPipelined now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + (peerAction, peerState', sharedState') = nextPeerActionPipelined now policy peeraddr peerState0 sharedState0 + policy = defaultTxDecisionPolicy { maxOutstandingTxBatchesPerPeer = 2 } -- Verifies that nextPeerAction prunes expired retained txs and removes their -- tx-key mappings while the peer is idle. From 670d07315a2132ca1c1207be66ec9906e22ff07d Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Wed, 22 Apr 2026 11:01:27 +0200 Subject: [PATCH 36/67] fixup: improve handleReceivedTxIds property testing Merge prop_handleReceivedTxIds_newEntries, prop_handleReceivedTxIds_knownToMempool and prop_handleReceivedTxIds_retainedIsLocalOnly into a single property based test that exercise all cases. --- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 664 ++++++++++++++---- 1 file changed, 544 insertions(+), 120 deletions(-) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 78398bb3b03..a49bfa72d8d 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -20,6 +20,8 @@ module Test.Ouroboros.Network.TxSubmission.TxLogic , runPeerActionLoop , runFanoutLoop , sharedTxStateInvariant + , peerTxLocalStateInvariant + , combinedStateInvariant , InvariantStrength (..) ) where @@ -58,9 +60,7 @@ import Test.Tasty.QuickCheck (testProperty) tests :: TestTree tests = testGroup "TxLogic" - [ testProperty "handleReceivedTxIds inserts new tx entries" prop_handleReceivedTxIds_newEntries - , testProperty "handleReceivedTxIds resolves txids already in mempool" prop_handleReceivedTxIds_knownToMempool - , testProperty "handleReceivedTxIds keeps retained txids local-only" prop_handleReceivedTxIds_retainedIsLocalOnly + [ testProperty "handleReceivedTxIds handles mixed new / retained / mempool txids" prop_handleReceivedTxIds , testCaseSteps "handleReceivedTxIds adds the current peer as an advertiser for active txs" unit_handleReceivedTxIds_addsAdvertiserForActiveTxs , testCaseSteps "nextPeerAction lets another peer claim a fresh tx when the first advertiser is full" unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull , testProperty "handleReceivedTxs buffers received and drops omitted txs" prop_handleReceivedTxs_buffersAndDropsOmitted @@ -156,6 +156,98 @@ data InvariantStrength = WeakInvariant | StrongInvariant deriving (Eq, Show) +-- | 'PeerTxLocalState' invariant. +-- +-- Consistency constraints between the peer-local bookkeeping maps: +-- +-- * Every available or downloaded key is still tracked in the +-- unacknowledged queue. +-- * Every requested key is also available (a key can only be requested +-- while its advertised size is still known). +-- * Downloaded keys are disjoint from available and from requested +-- (a key moves out of available/requested exactly when its body +-- lands in downloaded). +-- * @peerRequestedTxs@ equals the union of all @requestedTxBatchSet@s +-- and @peerRequestedTxsSize@ equals the sum of all batch sizes. +peerTxLocalStateInvariant + :: forall tx. + PeerTxLocalState tx + -> Property +peerTxLocalStateInvariant PeerTxLocalState { + peerUnacknowledgedTxIds, + peerAvailableTxIds, + peerRequestedTxs, + peerRequestedTxBatches, + peerRequestedTxsSize, + peerDownloadedTxs + } = + conjoin + [ counterexample "requested keys are not all available" + (property (peerRequestedTxs `IntSet.isSubsetOf` availableKeys)) + , counterexample "available keys are not all in the unacknowledged queue" + (property (availableKeys `IntSet.isSubsetOf` unackKeys)) + , counterexample "downloaded keys are not all in the unacknowledged queue" + (property (downloadedKeys `IntSet.isSubsetOf` unackKeys)) + , counterexample "downloaded and available key sets overlap" + (IntSet.null (downloadedKeys `IntSet.intersection` availableKeys)) + , counterexample "downloaded and requested key sets overlap" + (IntSet.null (downloadedKeys `IntSet.intersection` peerRequestedTxs)) + , counterexample "peerRequestedTxs does not match the batch key-set union" + (peerRequestedTxs === batchKeyUnion) + , counterexample "peerRequestedTxsSize does not match the sum of batch sizes" + (peerRequestedTxsSize === batchSizeSum) + ] + where + unackKeys = IntSet.fromList [ k | TxKey k <- toList peerUnacknowledgedTxIds ] + availableKeys = IntMap.keysSet peerAvailableTxIds + downloadedKeys = IntMap.keysSet peerDownloadedTxs + batchKeyUnion = + IntSet.unions (fmap requestedTxBatchSet (toList peerRequestedTxBatches)) + batchSizeSum = + sum (fmap requestedTxBatchSize (toList peerRequestedTxBatches)) + +-- | Combined 'SharedTxState' / 'PeerTxLocalState' invariant. +-- +-- Runs the individual invariants on each piece and adds the cross-state +-- coherence constraints: +-- +-- * The peer's 'sharedPeerAdvertisedTxKeys' (as recorded in the shared +-- state) are a subset of the peer's local unacknowledged queue — a peer +-- can only advertise keys it has actually received. +-- * Those advertised keys must have a matching entry in 'sharedTxTable': +-- an advertisement without an active tx entry is an orphan and would +-- leave 'txAdvertiserCount' out of sync with the peer key sets. +combinedStateInvariant + :: forall peeraddr txid tx. + ( Ord peeraddr + , Ord txid + , HasRawTxId txid + , Show peeraddr + , Show txid + ) + => InvariantStrength + -> peeraddr + -> PeerTxLocalState tx + -> SharedTxState peeraddr txid + -> Property +combinedStateInvariant strength peeraddr peerState sharedState = + conjoin + [ peerTxLocalStateInvariant peerState + , sharedTxStateInvariant strength sharedState + , counterexample "advertised keys escape the peer's unacknowledged queue" + (property (advertisedKeys `IntSet.isSubsetOf` unackKeys)) + , counterexample "advertised keys have no matching sharedTxTable entry" + (property (advertisedKeys `IntSet.isSubsetOf` IntMap.keysSet (sharedTxTable sharedState))) + ] + where + unackKeys = + IntSet.fromList [ k | TxKey k <- toList (peerUnacknowledgedTxIds peerState) ] + advertisedKeys = + maybe IntSet.empty sharedPeerAdvertisedTxKeys + (Map.lookup peeraddr (sharedPeers sharedState)) + + + -- | 'InboundState` invariant. -- sharedTxStateInvariant @@ -251,6 +343,36 @@ newtype ArbSharedPeerState = ArbSharedPeerState SharedPeerState newtype ArbPeerTxLocalState = ArbPeerTxLocalState (PeerTxLocalState (Tx TxId)) deriving Show +-- | Tag classifying how @handleReceivedTxIds@ should resolve each incoming +-- txid. +-- +-- * 'TxIdNew' — fresh txid, hits the new-entry branch. +-- * 'TxIdRetained' — pre-seeded into 'sharedRetainedTxs', hits the retained +-- branch. +-- * 'TxIdMempool' — mempool-known and not previously in 'sharedTxTable', +-- hits the mempool branch with a @Nothing@ lookup. +-- * 'TxIdMempoolResolvesActive' — mempool-known and pre-seeded into +-- 'sharedTxTable' as advertised by some other peer, hits the mempool +-- branch with a @Just@ lookup and therefore triggers +-- 'removeAdvertisingPeersForResolvedTxExcept' + peer wake-up. Production +-- only hits this when a tx reached the mempool via the local +-- tx-submission interface after remote peers had already advertised the +-- txid, so it is deliberately rare in the generator. +data TxIdGroupTag + = TxIdNew + | TxIdRetained + | TxIdMempool + | TxIdMempoolResolvesActive + deriving (Eq, Ord, Show) + +instance Arbitrary TxIdGroupTag where + arbitrary = frequency + [ (12, pure TxIdNew) + , (4, pure TxIdRetained) + , (4, pure TxIdMempool) + , (1, pure TxIdMempoolResolvesActive) + ] + instance Arbitrary ArbTxDecisionPolicy where arbitrary = ArbTxDecisionPolicy <$> ( @@ -318,123 +440,329 @@ instance Arbitrary ArbSharedTxState where | otherwise = ArbSharedTxState <$> shrinkSharedTxState sharedState --- Verifies that handleReceivedTxIds interns new txids, adds claimable entries --- for them, and preserves unrelated shared-state entries. -prop_handleReceivedTxIds_newEntries - :: Positive Int - -> ArbSharedTxState - -> NonEmptyList (TxId, Positive Int) +-- Verifies that handleReceivedTxIds resolves each incoming txid according to +-- its state: +-- +-- * 'TxIdNew' — new claimable entry for @peeraddr@ in sharedTxTable. +-- * 'TxIdRetained' — queued locally only; shared state unchanged. +-- * 'TxIdMempool' — interned and added to sharedRetainedTxs. +-- * 'TxIdMempoolResolvesActive' — active entry (advertised by another +-- peer) is removed from sharedTxTable, moved to sharedRetainedTxs, the +-- other peer's advertising for the key is cleared, and (if idle) its +-- generation is bumped. +-- +-- Also asserts the peer's pre-existing state and unrelated shared state are +-- preserved, and that the combined invariant holds before and after. +prop_handleReceivedTxIds + :: ArbSharedTxState + -> ArbPeerTxLocalState + -> NonEmptyList (TxId, Positive Int, TxIdGroupTag) -> Positive Int -> Property -prop_handleReceivedTxIds_newEntries (Positive peeraddr) (ArbSharedTxState sharedState0) (NonEmpty txids0) (Positive extraRequested) = - conjoin - [ peerRequestedTxIds peerState' === fromIntegral extraRequested - , StrictSeq.length (peerUnacknowledgedTxIds peerState') === length txidsAndSizes - , toList (peerUnacknowledgedTxIds peerState') === fmap (\(txid, _) -> lookupKeyOrFail txid sharedState') txidsAndSizes - , IntMap.size (peerAvailableTxIds peerState') === length txidsAndSizes - , Map.delete peeraddr (sharedPeers sharedState') === Map.delete peeraddr (sharedPeers sharedStateBase) - , sharedPeerAdvertisedTxKeys (lookupPeerOrFail peeraddr sharedState') === expectedAdvertisedKeys - , IntMap.restrictKeys (sharedTxTable sharedState') oldKeys === sharedTxTable sharedStateBase - , retainedRestrictKeys (sharedRetainedTxs sharedState') oldKeys === sharedRetainedTxs sharedStateBase - , IntMap.restrictKeys (sharedKeyToTxId sharedState') oldKeys === sharedKeyToTxId sharedStateBase - , sharedGeneration sharedState' === sharedGeneration sharedStateBase + 1 - , sharedNextTxKey sharedState' === sharedNextTxKey sharedStateBase + length txidsAndSizes - , conjoin (fmap checkExistingTxId (Map.toList (sharedTxIdToKey sharedStateBase))) - , conjoin (fmap checkEntry txidsAndSizes) - , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedState'" sharedState' - ] - where - sharedStateBase = ensurePeerAdvertisesTxKeys peeraddr [] sharedState0 - txidsAndSizes = - freshBatchAgainstSharedState sharedStateBase $ - dedupeBatch [ (abs txid + 1, mkSize txSize) | (txid, txSize) <- txids0 ] - oldKeys = IntMap.keysSet (sharedKeyToTxId sharedStateBase) - requestedToReply = fromIntegral (length txidsAndSizes) - peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = requestedToReply + fromIntegral extraRequested } - (peerState', sharedState') = - handleReceivedTxIds (const False) now defaultTxDecisionPolicy peeraddr requestedToReply txidsAndSizes peerState0 sharedStateBase - expectedAdvertisedKeys = - sharedPeerAdvertisedTxKeys (lookupPeerOrFail peeraddr sharedStateBase) - `IntSet.union` - IntSet.fromList [ unTxKey (lookupKeyOrFail txid sharedState') | (txid, _) <- txidsAndSizes ] - - checkExistingTxId (rawId, txKey) = - Map.lookup rawId (sharedTxIdToKey sharedState') === Just txKey - - checkEntry (txid, _) = - case IntMap.lookup (unTxKey (lookupKeyOrFail txid sharedState')) (sharedTxTable sharedState') of - Nothing -> counterexample ("missing tx entry for " ++ show txid) False - Just TxEntry { txLease, txAdvertiserCount, txAttempts } -> - conjoin - [ txLease === TxClaimable now - , txAdvertiserCount === 1 - , txAttempts === Map.empty - ] +prop_handleReceivedTxIds + (ArbSharedTxState sharedState0) + (ArbPeerTxLocalState peerStateGenerated) + (NonEmpty taggedInput) + (Positive extraRequested) = + forAll (genPeerAddrBiased sharedState0) $ \peeraddr -> + let sharedStateWithPeer = ensurePeerAdvertisesTxKeys peeraddr [] sharedState0 + + -- Canonicalize input: normalise each (txid, size), then dedupe by txid + -- while preserving the first-seen tag. + dedupedTagged :: [((TxId, SizeInBytes), TxIdGroupTag)] + dedupedTagged = + nubBy ((==) `on` (fst . fst)) + [ ((abs txid + 1, mkSize txSize), tag) + | (txid, txSize, tag) <- taggedInput + ] --- Verifies that handleReceivedTxIds retains txids already known to the --- mempool instead of leaving active tx entries behind. -prop_handleReceivedTxIds_knownToMempool - :: Positive Int - -> TxId - -> Positive Int - -> Property -prop_handleReceivedTxIds_knownToMempool (Positive peeraddr) txid0 txSize0 = - conjoin - [ peerAvailableTxIds peerState' === IntMap.empty - , toList (peerUnacknowledgedTxIds peerState') === [key] - , IntMap.lookup (unTxKey key) (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) - , retainedLookup (unTxKey key) (sharedRetainedTxs sharedState') === Just expectedRetainUntil - , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Just key - , IntMap.lookup (unTxKey key) (sharedKeyToTxId sharedState') === Just txid - , sharedGeneration sharedState' === 1 - , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedState'" sharedState' - ] - where - txid = abs txid0 + 1 - txSize = mkSize txSize0 - requestedToReply = 1 - peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = requestedToReply } - sharedState0 = ensurePeerAdvertisesTxKeys peeraddr [] emptySharedTxState - (peerState', sharedState') = - handleReceivedTxIds (== txid) now defaultTxDecisionPolicy peeraddr requestedToReply [(txid, txSize)] peerState0 sharedState0 - key = lookupKeyOrFail txid sharedState' - expectedRetainUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now + -- Shift all txids forward so they are disjoint from sharedStateWithPeer's + -- intern table and from each other. Preserves input order and tag mapping. + freshenedTxids :: [(TxId, SizeInBytes)] + freshenedTxids = + freshBatchAgainstSharedState sharedStateWithPeer (fmap fst dedupedTagged) + + taggedFreshened :: [((TxId, SizeInBytes), TxIdGroupTag)] + taggedFreshened = zip freshenedTxids (fmap snd dedupedTagged) + + (newGroup, retainedGroup, mempoolFreshGroup, resolveActiveCandidates) = + foldr partitionByTag ([], [], [], []) taggedFreshened + where + partitionByTag (e, TxIdNew) (n, r, m, a) = (e:n, r, m, a) + partitionByTag (e, TxIdRetained) (n, r, m, a) = (n, e:r, m, a) + partitionByTag (e, TxIdMempool) (n, r, m, a) = (n, r, e:m, a) + partitionByTag (e, TxIdMempoolResolvesActive) (n, r, m, a) = (n, r, m, e:a) + + -- Seed the retained group into the shared state first: intern the txids + -- and add them to sharedRetainedTxs. + sharedStateWithRetained = seedRetainedTxids retainedGroup sharedStateWithPeer + + -- Pick an advertiser peer for the resolve-active sub-group, if any peer + -- other than @peeraddr@ exists. If none is available, demote the + -- resolve-active candidates to fresh mempool entries so they still + -- exercise the mempool branch. + otherPeerOpt :: Maybe PeerAddr + otherPeerOpt = + case filter (/= peeraddr) + (Map.keys (sharedPeers sharedStateWithRetained)) of + [] -> Nothing + (p:_) -> Just p + + (mempoolResolveActiveGroup, mempoolGroup, sharedStateBase) = + case otherPeerOpt of + Just p | not (null resolveActiveCandidates) -> + ( resolveActiveCandidates + , mempoolFreshGroup + , seedActiveTxidsForOtherPeer p resolveActiveCandidates + sharedStateWithRetained + ) + _ -> + ( [] + , mempoolFreshGroup ++ resolveActiveCandidates + , sharedStateWithRetained + ) --- Verifies that txids already retained in shared state only update the peer's --- local queue and do not dirty the shared state again. -prop_handleReceivedTxIds_retainedIsLocalOnly - :: Positive Int - -> TxId - -> Positive Int - -> Property -prop_handleReceivedTxIds_retainedIsLocalOnly (Positive peeraddr) txid0 txSize0 = - conjoin - [ peerRequestedTxIds peerState' === 0 - , peerAvailableTxIds peerState' === IntMap.empty - , toList (peerUnacknowledgedTxIds peerState') === [key] - , sharedState' === sharedState0 - , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedState'" sharedState' - ] - where - txid = abs txid0 + 1 - txSize = mkSize txSize0 - key = TxKey 0 - k = unTxKey key - retainUntil = addTime 17 now - peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = 1 } - sharedState0 = ensurePeerAdvertisesTxKeys peeraddr [] $ - emptySharedTxState - { sharedRetainedTxs = retainedSingleton k retainUntil - , sharedTxIdToKey = Map.singleton (getRawTxId txid) key - , sharedKeyToTxId = IntMap.singleton k txid - , sharedNextTxKey = 1 - , sharedGeneration = 7 - } - (peerState', sharedState') = - handleReceivedTxIds (const False) now defaultTxDecisionPolicy peeraddr 1 [(txid, txSize)] peerState0 sharedState0 + -- The full input list, in original (interleaved) order. + txidsAndSizes :: [(TxId, SizeInBytes)] + txidsAndSizes = freshenedTxids + + mempoolTxidSet :: Set.Set TxId + mempoolTxidSet = + Set.fromList + (fmap fst mempoolGroup ++ fmap fst mempoolResolveActiveGroup) + mempoolHasTx :: TxId -> Bool + mempoolHasTx = (`Set.member` mempoolTxidSet) + + oldKeys = IntMap.keysSet (sharedKeyToTxId sharedStateBase) + -- Keys that are in sharedStateBase's intern table AND whose sharedTxTable + -- entry is about to be removed by the mempool branch. We exclude them + -- from the "unchanged at old keys" assertions; their behaviour is + -- covered explicitly by checkMempoolResolveActiveEntry. + resolveActiveKeySet :: IntSet.IntSet + resolveActiveKeySet = + IntSet.fromList + [ unTxKey (lookupKeyOrFail txid sharedStateBase) + | (txid, _) <- mempoolResolveActiveGroup + ] + stableOldKeys = oldKeys `IntSet.difference` resolveActiveKeySet + requestedToReply = fromIntegral (length txidsAndSizes) + -- Shift generated peer-local keys past everything that + -- handleReceivedTxIds touches, so the pre-existing peer-local keys stay + -- disjoint from both the base state and the newly-allocated keys. + peerKeyShift = sharedNextTxKey sharedStateBase + length txidsAndSizes + preExistingAdvertised = + sharedPeerAdvertisedTxKeys (lookupPeerOrFail peeraddr sharedStateBase) + advertisedPrefix = + StrictSeq.fromList [ TxKey k | k <- IntSet.toList preExistingAdvertised ] + peerStateShifted = shiftPeerTxLocalStateKeys peerKeyShift peerStateGenerated + peerState0 = + peerStateShifted { + peerUnacknowledgedTxIds = + advertisedPrefix <> peerUnacknowledgedTxIds peerStateShifted, + peerRequestedTxIds = requestedToReply + fromIntegral extraRequested + } + oldPeerAvailableKeys = IntMap.keysSet (peerAvailableTxIds peerState0) + (peerState', sharedState') = + handleReceivedTxIds mempoolHasTx now defaultTxDecisionPolicy peeraddr requestedToReply txidsAndSizes peerState0 sharedStateBase + + expectedRetainUntil = + addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now + + -- Only the new group extends the peer's advertised-key set. + expectedAdvertisedKeys = + preExistingAdvertised + `IntSet.union` + IntSet.fromList + [ unTxKey (lookupKeyOrFail txid sharedState') + | (txid, _) <- newGroup + ] + + -- Keys newly interned during the call (new + mempool-fresh). Retained + -- and resolve-active keys were pre-interned by the seed helpers. + expectedNextTxKeyAdvance = length newGroup + length mempoolGroup + + -- The generation bumps iff the call actually changed shared state. Pure + -- retained-only input leaves shared state untouched. + expectedGenerationAdvance :: Word64 + expectedGenerationAdvance + | null newGroup && null mempoolGroup && null mempoolResolveActiveGroup = 0 + | otherwise = 1 + + -- Peers whose entry may differ between sharedStateBase and sharedState': + -- always @peeraddr@, and also the chosen advertiser if the mempool + -- branch resolved any active entry. + affectedPeers :: Set.Set PeerAddr + affectedPeers = + Set.insert peeraddr $ + case otherPeerOpt of + Just p | not (null mempoolResolveActiveGroup) -> Set.singleton p + _ -> Set.empty + + checkNewEntry (txid, size) = + let k = unTxKey (lookupKeyOrFail txid sharedState') in + case IntMap.lookup k (sharedTxTable sharedState') of + Nothing -> + counterexample ("missing new tx entry for " ++ show txid) (property False) + Just TxEntry { txLease, txAdvertiserCount, txAttempts } -> + conjoin + [ txLease === TxClaimable now + , txAdvertiserCount === 1 + , txAttempts === (Map.empty :: Map.Map PeerAddr TxAttemptState) + , counterexample "new txid missing from peerAvailableTxIds" + (IntMap.lookup k (peerAvailableTxIds peerState') === Just size) + , counterexample "new txid missing from peer advertised keys" + (property (IntSet.member k + (sharedPeerAdvertisedTxKeys + (lookupPeerOrFail peeraddr sharedState')))) + ] + + checkRetainedEntry (txid, _) = + let k = unTxKey (lookupKeyOrFail txid sharedState') in + conjoin + [ counterexample "retained txid appears in sharedTxTable" + (property (IntMap.notMember k (sharedTxTable sharedState'))) + , counterexample "retained txid disappeared from sharedRetainedTxs" + (property (retainedMember k (sharedRetainedTxs sharedState'))) + , counterexample "retained txid leaked into peerAvailableTxIds" + (property (IntMap.notMember k (peerAvailableTxIds peerState'))) + , counterexample "retained txid leaked into peer advertised keys" + (property (IntSet.notMember k + (sharedPeerAdvertisedTxKeys + (lookupPeerOrFail peeraddr sharedState')))) + ] + + checkMempoolEntry (txid, _) = + let k = unTxKey (lookupKeyOrFail txid sharedState') in + conjoin + [ counterexample "mempool txid leaked into sharedTxTable" + (property (IntMap.notMember k (sharedTxTable sharedState'))) + , counterexample "mempool txid missing or wrong retain-until in sharedRetainedTxs" + (retainedLookup k (sharedRetainedTxs sharedState') + === Just expectedRetainUntil) + , counterexample "mempool txid leaked into peerAvailableTxIds" + (property (IntMap.notMember k (peerAvailableTxIds peerState'))) + , counterexample "mempool txid leaked into peer advertised keys" + (property (IntSet.notMember k + (sharedPeerAdvertisedTxKeys + (lookupPeerOrFail peeraddr sharedState')))) + ] + + -- A resolve-active entry was in sharedTxTable (advertised by an "other" + -- peer) before the call. The mempool branch deletes it from + -- sharedTxTable, inserts it into sharedRetainedTxs, and clears the + -- "other" peer's advertising for that key. peeraddr never advertised + -- the key (keys are freshened out of peeraddr's advertised range). + checkMempoolResolveActiveEntry (txid, _) = + let k = unTxKey (lookupKeyOrFail txid sharedState') in + conjoin + [ counterexample "resolve-active txid remained in sharedTxTable" + (property (IntMap.notMember k (sharedTxTable sharedState'))) + , counterexample "resolve-active txid missing or wrong retain-until" + (retainedLookup k (sharedRetainedTxs sharedState') + === Just expectedRetainUntil) + , counterexample "resolve-active txid leaked into peerAvailableTxIds" + (property (IntMap.notMember k (peerAvailableTxIds peerState'))) + , counterexample "resolve-active txid leaked into peer advertised keys" + (property (IntSet.notMember k + (sharedPeerAdvertisedTxKeys + (lookupPeerOrFail peeraddr sharedState')))) + , case otherPeerOpt of + Nothing -> property True + Just op -> + counterexample "other advertiser still lists resolve-active key" + (property (IntSet.notMember k + (sharedPeerAdvertisedTxKeys + (lookupPeerOrFail op sharedState')))) + ] + + -- If the resolve-active group is non-empty, the chosen @otherPeer@'s + -- post-call state should have the resolve-active keys stripped from its + -- advertising (reverting to what it advertised in sharedStateWithRetained) + -- and its generation bumped by 1 iff its phase is PeerIdle. + checkOtherPeerState = + case otherPeerOpt of + Just op | not (null mempoolResolveActiveGroup) -> + let original = lookupPeerOrFail op sharedStateWithRetained + post = lookupPeerOrFail op sharedState' + bumpIfIdle g + | sharedPeerPhase original == PeerIdle = g + 1 + | otherwise = g + in conjoin + [ counterexample "other peer's advertised keys not restored" + (sharedPeerAdvertisedTxKeys post + === sharedPeerAdvertisedTxKeys original) + , counterexample "other peer's phase changed unexpectedly" + (sharedPeerPhase post === sharedPeerPhase original) + , counterexample "other peer's generation bump mismatch" + (sharedPeerGeneration post + === bumpIfIdle (sharedPeerGeneration original)) + ] + _ -> property True + + checkExistingTxId (rawId, txKey) = + Map.lookup rawId (sharedTxIdToKey sharedState') === Just txKey in + classify (StrictSeq.null (peerUnacknowledgedTxIds peerStateGenerated)) + "generated peer-local state: empty unacknowledged queue" $ + classify (not (Map.member peeraddr (sharedPeers sharedState0))) + "peeraddr: fresh (not in generated sharedState)" $ + classify (not (IntSet.null preExistingAdvertised)) + "peeraddr: has pre-existing advertised keys" $ + classify (length txidsAndSizes /= length taggedInput) + "received txids: reduced by dedupe or fresh-shift" $ + classify (not (null newGroup)) "txids include new" $ + classify (not (null retainedGroup)) "txids include retained" $ + classify (not (null mempoolGroup)) "txids include mempool" $ + classify (not (null mempoolResolveActiveGroup)) "txids include resolve-active" $ + tabulate "received txids" [bucket (length txidsAndSizes)] $ + tabulate "new group" [bucket (length newGroup)] $ + tabulate "retained group" [bucket (length retainedGroup)] $ + tabulate "mempool group" [bucket (length mempoolGroup)] $ + tabulate "resolve-active group" [bucket (length mempoolResolveActiveGroup)] $ + tabulate "sharedState peers" [bucket (Map.size (sharedPeers sharedStateBase))] $ + tabulate "active txs" [bucket (IntMap.size (sharedTxTable sharedStateBase))] $ + tabulate "retained txs" [bucket (retainedSize (sharedRetainedTxs sharedStateBase))] $ + conjoin + [ peerRequestedTxIds peerState' === fromIntegral extraRequested + , toList (peerUnacknowledgedTxIds peerState') + === toList (peerUnacknowledgedTxIds peerState0) + ++ fmap (\(txid, _) -> lookupKeyOrFail txid sharedState') txidsAndSizes + , IntMap.size (peerAvailableTxIds peerState') + === IntMap.size (peerAvailableTxIds peerState0) + length newGroup + , IntMap.restrictKeys (peerAvailableTxIds peerState') oldPeerAvailableKeys + === peerAvailableTxIds peerState0 + , peerRequestedTxs peerState' === peerRequestedTxs peerState0 + , peerRequestedTxBatches peerState' === peerRequestedTxBatches peerState0 + , peerRequestedTxsSize peerState' === peerRequestedTxsSize peerState0 + , peerDownloadedTxs peerState' === peerDownloadedTxs peerState0 + , peerDownloadStartTime peerState' === peerDownloadStartTime peerState0 + , peerScore peerState' === peerScore peerState0 + , Map.withoutKeys (sharedPeers sharedState') affectedPeers + === Map.withoutKeys (sharedPeers sharedStateBase) affectedPeers + , sharedPeerAdvertisedTxKeys (lookupPeerOrFail peeraddr sharedState') + === expectedAdvertisedKeys + , IntMap.restrictKeys (sharedTxTable sharedState') stableOldKeys + === IntMap.restrictKeys (sharedTxTable sharedStateBase) stableOldKeys + , retainedRestrictKeys (sharedRetainedTxs sharedState') stableOldKeys + === retainedRestrictKeys (sharedRetainedTxs sharedStateBase) stableOldKeys + , IntMap.restrictKeys (sharedKeyToTxId sharedState') oldKeys + === sharedKeyToTxId sharedStateBase + , sharedGeneration sharedState' + === sharedGeneration sharedStateBase + expectedGenerationAdvance + , sharedNextTxKey sharedState' + === sharedNextTxKey sharedStateBase + expectedNextTxKeyAdvance + , conjoin (fmap checkExistingTxId (Map.toList (sharedTxIdToKey sharedStateBase))) + , conjoin (fmap checkNewEntry newGroup) + , conjoin (fmap checkRetainedEntry retainedGroup) + , conjoin (fmap checkMempoolEntry mempoolGroup) + , conjoin (fmap checkMempoolResolveActiveEntry mempoolResolveActiveGroup) + , checkOtherPeerState + , counterexample "combined invariant violated before the call" + (combinedStateInvariant StrongInvariant peeraddr peerState0 sharedStateBase) + , counterexample "combined invariant violated after the call" + (combinedStateInvariant StrongInvariant peeraddr peerState' sharedState') + , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedState'" sharedState' + ] unit_handleReceivedTxIds_addsAdvertiserForActiveTxs :: (String -> IO ()) -> Assertion unit_handleReceivedTxIds_addsAdvertiserForActiveTxs step = do @@ -2277,6 +2605,21 @@ genDistinctPositiveInts count | count <= 0 = pure [] | otherwise = take count <$> shuffle [1 .. max count (count * 4 + 5)] +-- Pick a peer address biased toward existing peers in the shared state, so +-- the generator frequently exercises the "peeraddr already known" code +-- paths. Falls back to a fresh small-range address when the shared state +-- has no peers. +genPeerAddrBiased :: SharedTxState PeerAddr TxId -> Gen PeerAddr +genPeerAddrBiased sharedState = + case Map.keys (sharedPeers sharedState) of + [] -> genFresh + peers -> frequency + [ (3, elements peers) + , (1, genFresh) + ] + where + genFresh = chooseInt (1, 64) + -- Generate expiry times near the shared test reference time. genSharedExpiryTime :: Gen Time genSharedExpiryTime = @@ -2309,6 +2652,17 @@ now = Time 100 mkSize :: Positive Int -> SizeInBytes mkSize (Positive n) = fromIntegral ((n `mod` 65535) + 1) +-- Render a count into a coarse bucket label for QuickCheck's 'tabulate'. +bucket :: Int -> String +bucket n + | n <= 0 = "0" + | n == 1 = "1" + | n <= 5 = "2-5" + | n <= 10 = "6-10" + | n <= 25 = "11-25" + | n <= 100 = "26-100" + | otherwise = "100+" + -- Build a valid test transaction with matching body and advertised size. mkTx :: TxId -> SizeInBytes -> Tx TxId mkTx txid txSize = Tx @@ -2356,6 +2710,28 @@ ensurePeerAdvertisesTxKeys peeraddr txKeys st@SharedTxState { sharedPeers } = sharedPeerAdvertisedTxKeys sharedPeerState `IntSet.union` advertisedKeys }) +-- Shift every TxKey referenced by a peer-local state by a constant offset so +-- the state can be composed with a foreign SharedTxState without key +-- collisions. +shiftPeerTxLocalStateKeys :: Int -> PeerTxLocalState tx -> PeerTxLocalState tx +shiftPeerTxLocalStateKeys offset peerState = peerState { + peerUnacknowledgedTxIds = + fmap shiftTxKey (peerUnacknowledgedTxIds peerState), + peerAvailableTxIds = + IntMap.mapKeysMonotonic (+ offset) (peerAvailableTxIds peerState), + peerRequestedTxs = + IntSet.map (+ offset) (peerRequestedTxs peerState), + peerRequestedTxBatches = + fmap shiftBatch (peerRequestedTxBatches peerState), + peerDownloadedTxs = + IntMap.mapKeysMonotonic (+ offset) (peerDownloadedTxs peerState) + } + where + shiftTxKey (TxKey k) = TxKey (k + offset) + shiftBatch batch = batch { + requestedTxBatchSet = IntSet.map (+ offset) (requestedTxBatchSet batch) + } + -- Intern a list of txids into an otherwise empty shared state. mkSharedState :: [TxId] -> SharedTxState PeerAddr TxId mkSharedState txids = snd (internTxIds txids emptySharedTxState) @@ -2396,10 +2772,6 @@ lookupPeerOrFail peeraddr st = Just sharedPeerState -> sharedPeerState Nothing -> error "TxLogic.lookupPeerOrFail: missing peer" --- Drop duplicate txids while keeping the first proposed size. -dedupeBatch :: [(TxId, SizeInBytes)] -> [(TxId, SizeInBytes)] -dedupeBatch = nubBy ((==) `on` fst) - -- Shift proposed txids forward until the batch is disjoint from the shared intern table. freshBatchAgainstSharedState :: SharedTxState PeerAddr TxId -> [(TxId, SizeInBytes)] -> [(TxId, SizeInBytes)] freshBatchAgainstSharedState sharedState = reverse . snd . foldl' step (reserved, []) @@ -2410,6 +2782,58 @@ freshBatchAgainstSharedState sharedState = reverse . snd . foldl' step (reserved let freshTxId = firstFreshTxId used txid in (Set.insert (getRawTxId freshTxId) used, (freshTxId, txSize) : acc) +-- Intern the given txids into the shared state and seed each into +-- sharedRetainedTxs. +seedRetainedTxids + :: [(TxId, SizeInBytes)] + -> SharedTxState PeerAddr TxId + -> SharedTxState PeerAddr TxId +seedRetainedTxids entries st0 = + stInterned { + sharedRetainedTxs = + foldl' (\r k -> retainedInsertMax k retainUntil r) + (sharedRetainedTxs stInterned) + retainedKeys + } + where + retainUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now + (_, stInterned) = internTxIds (fmap fst entries) st0 + retainedKeys = [ unTxKey (lookupKeyOrFail txid stInterned) + | (txid, _) <- entries + ] + +-- Intern the given txids and add an active sharedTxTable entry for each, +-- advertised by the given peer. +seedActiveTxidsForOtherPeer + :: PeerAddr + -> [(TxId, SizeInBytes)] + -> SharedTxState PeerAddr TxId + -> SharedTxState PeerAddr TxId +seedActiveTxidsForOtherPeer otherPeer entries st0 = + stInterned { + sharedTxTable = + foldl' (\tbl k -> IntMap.insert k activeEntry tbl) + (sharedTxTable stInterned) + activeKeys, + sharedPeers = + Map.adjust augmentAdvertised otherPeer (sharedPeers stInterned) + } + where + activeEntry = TxEntry { + txLease = TxClaimable now, + txAdvertiserCount = 1, + txAttempts = Map.empty + } + (_, stInterned) = internTxIds (fmap fst entries) st0 + activeKeys = [ unTxKey (lookupKeyOrFail txid stInterned) + | (txid, _) <- entries + ] + augmentAdvertised sps = sps { + sharedPeerAdvertisedTxKeys = + IntSet.union (sharedPeerAdvertisedTxKeys sps) + (IntSet.fromList activeKeys) + } + -- Find the first txid not present in the reserved set. firstFreshTxId :: Set.Set RawTxId -> TxId -> TxId firstFreshTxId used = go From c72847a98e55dd5026f78382eb16e151b82d3dcf Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Wed, 22 Apr 2026 14:25:28 +0200 Subject: [PATCH 37/67] fixup: merge tests into prop_handleReceivedTxs Merge prop_handleReceivedTxs_buffersAndDropsOmitted, prop_handleReceivedTxs_dropsLateBodies, prop_handleReceivedTxs_penalizesOmittedAfterPrune into prop_handleReceivedTxs. Improve tests to use arbitrary shared state, local state and a list of TXs. --- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 690 +++++++++++++----- 1 file changed, 520 insertions(+), 170 deletions(-) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index a49bfa72d8d..a2f44bc8233 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -34,6 +34,7 @@ import Data.IntMap.Strict qualified as IntMap import Data.IntSet qualified as IntSet import Data.List (nub, nubBy) import Data.Map.Strict qualified as Map +import Data.Maybe (isJust) import Data.Sequence.Strict qualified as StrictSeq import Data.Set qualified as Set import Data.Word (Word64) @@ -63,9 +64,7 @@ tests = [ testProperty "handleReceivedTxIds handles mixed new / retained / mempool txids" prop_handleReceivedTxIds , testCaseSteps "handleReceivedTxIds adds the current peer as an advertiser for active txs" unit_handleReceivedTxIds_addsAdvertiserForActiveTxs , testCaseSteps "nextPeerAction lets another peer claim a fresh tx when the first advertiser is full" unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull - , testProperty "handleReceivedTxs buffers received and drops omitted txs" prop_handleReceivedTxs_buffersAndDropsOmitted - , testProperty "handleReceivedTxs drops late bodies already retained or in mempool" prop_handleReceivedTxs_dropsLateBodies - , testProperty "handleReceivedTxs penalizes omitted txs after full prune" prop_handleReceivedTxs_penalizesOmittedAfterPrune + , testProperty "handleReceivedTxs handles mixed buffered / omitted / late-retained / late-mempool / pruned txids" prop_handleReceivedTxs , testProperty "handleSubmittedTxs retains accepted and drops rejected" prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected , testProperty "nextPeerAction prioritises submitting buffered owned txs" prop_nextPeerAction_prioritisesSubmit , testProperty "nextPeerAction claims claimable tx for best idle advertiser" prop_nextPeerAction_claimsClaimableTx @@ -373,6 +372,70 @@ instance Arbitrary TxIdGroupTag where , (1, pure TxIdMempoolResolvesActive) ] +-- | Per-requested-txid fate, driving the coherent pre-state for +-- 'handleReceivedTxs' properties: +-- +-- * 'RfBuffered': body is in the reply, entry is active with peeraddr's +-- TxDownloading attempt. Expected: attempt flipped to TxBuffered, body +-- added to peerDownloadedTxs. @Bool@ = co-advertised by another peer. +-- * 'RfOmitted': body is not in the reply, entry is active with peeraddr's +-- TxDownloading attempt. Expected: lease released; entry survives if +-- co-advertised, otherwise reaped by dropDeadActiveKeys. @Bool@ = +-- co-advertised. +-- * 'RfLateRetained': body is in the reply, but the key is already in +-- sharedRetainedTxs (no sharedTxTable entry). Expected: body dropped, +-- lateCount incremented, no state change beyond the usual peer bookkeeping. +-- * 'RfLateMempool': body is in the reply, entry is active with peeraddr's +-- TxDownloading attempt, and the callback reports the tx as already in +-- the mempool. Expected: body dropped, entry moved from sharedTxTable +-- to sharedRetainedTxs, advertising stripped, any other advertiser woken. +-- * 'RfOmittedPruned': body is not in the reply, and the key is not in +-- sharedState at all (fully pruned by some concurrent cleanup before +-- the reply arrives). Expected: omittedCount incremented, no shared-state +-- change (@keyWasLive@ is False so 'handleOmitted' takes the count-only +-- branch). +data RequestedFate + = RfBuffered !Bool + | RfOmitted !Bool + | RfLateRetained + | RfLateMempool + | RfOmittedPruned + deriving (Eq, Show) + +instance Arbitrary RequestedFate where + arbitrary = frequency + [ (4, pure (RfBuffered False)) + , (2, pure (RfBuffered True)) + , (3, pure (RfOmitted False)) + , (2, pure (RfOmitted True)) + , (1, pure RfLateRetained) + , (1, pure RfLateMempool) + , (1, pure RfOmittedPruned) + ] + +rfInReply :: RequestedFate -> Bool +rfInReply RfBuffered{} = True +rfInReply RfOmitted{} = False +rfInReply RfLateRetained = True +rfInReply RfLateMempool = True +rfInReply RfOmittedPruned = False + +rfCoAdvertised :: RequestedFate -> Bool +rfCoAdvertised (RfBuffered c) = c +rfCoAdvertised (RfOmitted c) = c +rfCoAdvertised _ = False + +rfGoesToActive :: RequestedFate -> Bool +rfGoesToActive RfBuffered{} = True +rfGoesToActive RfOmitted{} = True +rfGoesToActive RfLateMempool = True +rfGoesToActive RfLateRetained = False +rfGoesToActive RfOmittedPruned = False + +rfIsPruned :: RequestedFate -> Bool +rfIsPruned RfOmittedPruned = True +rfIsPruned _ = False + instance Arbitrary ArbTxDecisionPolicy where arbitrary = ArbTxDecisionPolicy <$> ( @@ -870,178 +933,407 @@ unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull step = do } peerBState0 = emptyPeerTxLocalState { peerRequestedTxIds = requestedToReply } --- Verifies that handleReceivedTxs buffers received bodies and removes omitted --- requested txs from peer and shared state. -prop_handleReceivedTxs_buffersAndDropsOmitted - :: Positive Int - -> TxId - -> TxId - -> Positive Int - -> Positive Int +-- Verifies that handleReceivedTxs resolves each requested txid according to +-- its 'RequestedFate': 'RfBuffered' flips the attempt to 'TxBuffered' and +-- inserts the body; 'RfOmitted' releases peeraddr's lease with the entry +-- surviving iff co-advertised; 'RfLateRetained' drops the body and counts +-- a late reply with no further state change; 'RfLateMempool' drops the +-- body, moves the active entry to sharedRetainedTxs, and strips advertising; +-- 'RfOmittedPruned' counts a penalty even though the key has already been +-- fully pruned from shared state. Aggregate 'omittedCount' and 'lateCount' +-- match the group sizes, peer-local fields handleReceivedTxs does not touch +-- are preserved, and the combined invariant holds before and after. +prop_handleReceivedTxs + :: ArbSharedTxState + -> ArbPeerTxLocalState + -> NonEmptyList (TxId, Positive Int, RequestedFate) -> Property -prop_handleReceivedTxs_buffersAndDropsOmitted (Positive peeraddr) txidA0 txidB0 txSizeA0 txSizeB0 = - txidA /= txidB ==> +prop_handleReceivedTxs + (ArbSharedTxState sharedState0) + (ArbPeerTxLocalState peerStateGenerated) + (NonEmpty requestedInput) = + forAll (genPeerAddrBiased sharedState0) $ \peeraddr -> + let sharedStateWithPeer = ensurePeerAdvertisesTxKeys peeraddr [] sharedState0 + + dedupedTagged :: [((TxId, SizeInBytes), RequestedFate)] + dedupedTagged = + nubBy ((==) `on` (fst . fst)) + [ ((abs txid + 1, mkSize txSize), fate) + | (txid, txSize, fate) <- requestedInput + ] + + freshenedTxids :: [(TxId, SizeInBytes)] + freshenedTxids = + freshBatchAgainstSharedState sharedStateWithPeer (fmap fst dedupedTagged) + + taggedFreshened :: [((TxId, SizeInBytes), RequestedFate)] + taggedFreshened = zip freshenedTxids (fmap snd dedupedTagged) + + otherPeerOpt :: Maybe PeerAddr + otherPeerOpt = + case filter (/= peeraddr) (Map.keys (sharedPeers sharedStateWithPeer)) of + [] -> Nothing + (p:_) -> Just p + + -- Partition by fate. + bufferedGroup, omittedGroup, lateRetainedGroup, lateMempoolGroup, prunedGroup + :: [((TxId, SizeInBytes), RequestedFate)] + bufferedGroup = [ e | e@(_, RfBuffered{}) <- taggedFreshened ] + omittedGroup = [ e | e@(_, RfOmitted{}) <- taggedFreshened ] + lateRetainedGroup = [ e | e@(_, RfLateRetained) <- taggedFreshened ] + lateMempoolGroup = [ e | e@(_, RfLateMempool) <- taggedFreshened ] + prunedGroup = [ e | e@(_, RfOmittedPruned) <- taggedFreshened ] + + -- Entries that need an active sharedTxTable seed, tagged with whether + -- the entry is co-advertised by otherPeer. LateMempool keys seed an + -- active entry (single advertiser) so the mempool branch of handleOne + -- hits the Just lookup and fires removeAdvertisingPeersForResolvedTx. + activeSeedTagged :: [((TxId, SizeInBytes), Bool)] + activeSeedTagged = + [ (fst e, rfCoAdvertised (snd e)) + | e <- taggedFreshened + , rfGoesToActive (snd e) + ] + + -- Seed retained first (no sharedTxTable entry, no advertising), then + -- active on top (leased to peeraddr with a TxDownloading attempt, plus + -- otherPeer advertising for co-advertised ones). Pruned entries are + -- deliberately not seeded: their keys live only in the peer's local + -- bookkeeping. + sharedStateWithLateRetained = + seedRetainedTxids (fmap fst lateRetainedGroup) sharedStateWithPeer + sharedStateBase = + seedRequestedActiveTxids peeraddr otherPeerOpt activeSeedTagged + sharedStateWithLateRetained + + -- Synthetic keys for pruned entries, chosen to land above every key + -- that 'sharedStateBase' already uses so they don't collide with + -- anything interned. These keys never appear in any shared-state map. + prunedAllocations :: [(((TxId, SizeInBytes), RequestedFate), Int)] + prunedAllocations = + zip prunedGroup [ sharedNextTxKey sharedStateBase + i + | i <- [0 .. length prunedGroup - 1] ] + + -- Resolve each tagged entry to an Int key: interned entries look their + -- key up in sharedStateBase; pruned entries use their synthetic key. + prunedKeyByTxId :: Map.Map TxId Int + prunedKeyByTxId = + Map.fromList [ (txid, k) | (((txid, _), _), k) <- prunedAllocations ] + + keyIntOf :: ((TxId, SizeInBytes), RequestedFate) -> Int + keyIntOf ((txid, _), fate) + | rfIsPruned fate = prunedKeyByTxId Map.! txid + | otherwise = unTxKey (lookupKeyOrFail txid sharedStateBase) + + -- All requested keys, in input order. + requestedKeyInts :: [Int] + requestedKeyInts = [ keyIntOf e | e <- taggedFreshened ] + requestedKeys :: [TxKey] + requestedKeys = fmap TxKey requestedKeyInts + requestedKeysSet :: IntSet.IntSet + requestedKeysSet = IntSet.fromList requestedKeyInts + requestedAvailableMap :: IntMap.IntMap SizeInBytes + requestedAvailableMap = + IntMap.fromList (zip requestedKeyInts (fmap (snd . fst) taggedFreshened)) + requestedTotalSize :: SizeInBytes + requestedTotalSize = sum (fmap (snd . fst) taggedFreshened) + requestedBatch = mkRequestedTxBatch requestedKeys requestedTotalSize + + -- Keys genuinely co-advertised by otherPeer (only Buffered/Omitted tags + -- can be co-advertised and only if otherPeerOpt is Just). + coAdvertisedKeys :: IntSet.IntSet + coAdvertisedKeys = + IntSet.fromList + [ keyIntOf e + | e@(_, fate) <- taggedFreshened + , rfCoAdvertised fate + , isJust otherPeerOpt + ] + + -- Omitted entries that survive (co-advertised) vs get reaped (solo). + omittedSurvivingKeys, omittedReapedKeys, lateMempoolKeys :: IntSet.IntSet + omittedSurvivingKeys = + IntSet.fromList [ keyIntOf e | e <- omittedGroup, keyIntOf e `IntSet.member` coAdvertisedKeys ] + omittedReapedKeys = + IntSet.fromList [ keyIntOf e | e <- omittedGroup, not (keyIntOf e `IntSet.member` coAdvertisedKeys) ] + lateMempoolKeys = + IntSet.fromList [ keyIntOf e | e <- lateMempoolGroup ] + + -- mempoolHasTx returns True exactly for the LateMempool group's txids. + mempoolTxidSet :: Set.Set TxId + mempoolTxidSet = Set.fromList (fmap (fst . fst) lateMempoolGroup) + mempoolHasTxFn :: TxId -> Bool + mempoolHasTxFn = (`Set.member` mempoolTxidSet) + + -- Body list submitted to handleReceivedTxs: every in-reply entry. + receivedBodies :: [(TxId, Tx TxId)] + receivedBodies = + [ (txid, mkTx txid size) + | ((txid, size), fate) <- taggedFreshened + , rfInReply fate + ] + + -- Peer-local state. Shift the generator's keys out of range, then + -- prepend advertised-from-sharedState0 keys plus our requested keys to + -- the unack queue, and overwrite the request bookkeeping with a single + -- batch of our requested keys (handleReceivedTxs only processes the + -- head batch). The shift must also land above the pruned keys, which + -- were allocated starting at 'sharedNextTxKey sharedStateBase'. + peerKeyShift = + sharedNextTxKey sharedStateBase + length prunedGroup + 1 + preExistingAdvertised = + sharedPeerAdvertisedTxKeys + (lookupPeerOrFail peeraddr sharedStateWithPeer) + unackPrefix :: [TxKey] + unackPrefix = + [ TxKey k | k <- IntSet.toList preExistingAdvertised ] + ++ requestedKeys + peerStateShifted = shiftPeerTxLocalStateKeys peerKeyShift peerStateGenerated + peerState0 = peerStateShifted { + peerUnacknowledgedTxIds = + StrictSeq.fromList unackPrefix + <> peerUnacknowledgedTxIds peerStateShifted, + peerAvailableTxIds = requestedAvailableMap, + peerRequestedTxs = requestedKeysSet, + peerRequestedTxBatches = StrictSeq.singleton requestedBatch, + peerRequestedTxsSize = requestedTotalSize + } + + (omittedCount, lateCount, peerState', sharedState') = + handleReceivedTxs mempoolHasTxFn now defaultTxDecisionPolicy peeraddr + receivedBodies peerState0 sharedStateBase + + expectedRetainUntil = + addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now + + -- Per-fate assertions. + checkBufferedEntry ((txid, size), _) = + let k = unTxKey (lookupKeyOrFail txid sharedState') in + conjoin + [ counterexample "buffered: peeraddr attempt not TxBuffered" + (fmap (\TxEntry { txAttempts } -> Map.lookup peeraddr txAttempts) + (IntMap.lookup k (sharedTxTable sharedState')) + === Just (Just TxBuffered)) + , counterexample "buffered: body missing from peerDownloadedTxs" + (fmap getTxId (IntMap.lookup k (peerDownloadedTxs peerState')) + === Just txid) + , counterexample "buffered: body has wrong size" + (fmap getTxSize (IntMap.lookup k (peerDownloadedTxs peerState')) + === Just size) + ] + + checkOmittedSurvivingEntry ((txid, _), _) = + let k = unTxKey (lookupKeyOrFail txid sharedState') in + case IntMap.lookup k (sharedTxTable sharedState') of + Nothing -> + counterexample ("co-adv omitted entry was reaped for " ++ show txid) + (property False) + Just TxEntry { txLease, txAdvertiserCount, txAttempts } -> + conjoin + [ counterexample "co-adv omitted: lease not demoted" + (txLease === TxClaimable now) + , counterexample "co-adv omitted: advertiser count not decremented" + (txAdvertiserCount === 1) + , counterexample "co-adv omitted: peeraddr attempt not cleared" + (property (Map.notMember peeraddr txAttempts)) + ] + + checkOmittedReapedEntry ((txid, _), _) = + let k = unTxKey (lookupKeyOrFail txid sharedStateBase) + rawId = getRawTxId txid + in conjoin + [ counterexample "reaped omitted: still in sharedTxTable" + (property (IntMap.notMember k (sharedTxTable sharedState'))) + , counterexample "reaped omitted: leaked into sharedRetainedTxs" + (property (not (retainedMember k (sharedRetainedTxs sharedState')))) + , counterexample "reaped omitted: still in sharedKeyToTxId" + (property (IntMap.notMember k (sharedKeyToTxId sharedState'))) + , counterexample "reaped omitted: still in sharedTxIdToKey" + (property (Map.notMember rawId (sharedTxIdToKey sharedState'))) + ] + + -- LateRetained: sharedRetainedTxs entry untouched; no sharedTxTable + -- entry exists or is created; body was dropped. + checkLateRetainedEntry ((txid, _), _) = + let k = unTxKey (lookupKeyOrFail txid sharedStateBase) in + conjoin + [ counterexample "late-retained: leaked into sharedTxTable" + (property (IntMap.notMember k (sharedTxTable sharedState'))) + , counterexample "late-retained: retain-until mismatch" + (retainedLookup k (sharedRetainedTxs sharedState') + === Just expectedRetainUntil) + , counterexample "late-retained: leaked into peerDownloadedTxs" + (property (IntMap.notMember k (peerDownloadedTxs peerState'))) + ] + + -- LateMempool: active entry moved from sharedTxTable to + -- sharedRetainedTxs; peeraddr's advertising stripped. + checkLateMempoolEntry ((txid, _), _) = + let k = unTxKey (lookupKeyOrFail txid sharedStateBase) in + conjoin + [ counterexample "late-mempool: still in sharedTxTable" + (property (IntMap.notMember k (sharedTxTable sharedState'))) + , counterexample "late-mempool: retain-until mismatch" + (retainedLookup k (sharedRetainedTxs sharedState') + === Just expectedRetainUntil) + , counterexample "late-mempool: leaked into peerDownloadedTxs" + (property (IntMap.notMember k (peerDownloadedTxs peerState'))) + , counterexample "late-mempool: still in peer advertised keys" + (property (IntSet.notMember k + (sharedPeerAdvertisedTxKeys + (lookupPeerOrFail peeraddr sharedState')))) + ] + + -- Pruned: synthetic key was never interned; handleOmitted takes the + -- count-only branch and leaves shared state untouched. + checkPrunedEntry (((txid, _), _), k) = + let rawId = getRawTxId txid in + conjoin + [ counterexample "pruned: leaked into sharedTxTable" + (property (IntMap.notMember k (sharedTxTable sharedState'))) + , counterexample "pruned: leaked into sharedRetainedTxs" + (property (not (retainedMember k (sharedRetainedTxs sharedState')))) + , counterexample "pruned: leaked into sharedKeyToTxId" + (property (IntMap.notMember k (sharedKeyToTxId sharedState'))) + , counterexample "pruned: leaked into sharedTxIdToKey" + (property (Map.notMember rawId (sharedTxIdToKey sharedState'))) + , counterexample "pruned: leaked into peerDownloadedTxs" + (property (IntMap.notMember k (peerDownloadedTxs peerState'))) + ] + + -- Expected peeraddr advertised keys post-call: pre-existing plus only + -- the buffered group's keys. Omitted-group advertising is stripped by + -- removeOmittedAdvertisedKeys; LateMempool advertising is stripped by + -- removeAdvertisingPeersForResolvedTx; LateRetained never advertised. + expectedPeerAdvertisedPost = + preExistingAdvertised + `IntSet.union` + IntSet.fromList + [ unTxKey (lookupKeyOrFail txid sharedState') + | ((txid, _), _) <- bufferedGroup + ] + + -- otherPeer is added to wakePeers when any surviving omitted entry + -- exists; its generation is bumped iff it was PeerIdle. LateMempool + -- is set up single-advertiser so it does not wake otherPeer here. + checkOtherPeerState = + case otherPeerOpt of + Just op | not (IntSet.null omittedSurvivingKeys) -> + let original = lookupPeerOrFail op sharedStateBase + post = lookupPeerOrFail op sharedState' + bumpIfIdle g + | sharedPeerPhase original == PeerIdle = g + 1 + | otherwise = g + in conjoin + [ counterexample "other peer's advertised keys changed" + (sharedPeerAdvertisedTxKeys post + === sharedPeerAdvertisedTxKeys original) + , counterexample "other peer's phase changed" + (sharedPeerPhase post === sharedPeerPhase original) + , counterexample "other peer's generation bump mismatch" + (sharedPeerGeneration post + === bumpIfIdle (sharedPeerGeneration original)) + ] + _ -> property True + + affectedPeers :: Set.Set PeerAddr + affectedPeers = + Set.insert peeraddr $ + case otherPeerOpt of + Just op | not (IntSet.null omittedSurvivingKeys) -> Set.singleton op + _ -> Set.empty + + -- Key sets used for the "unchanged on stable old keys" assertions. + -- Every requested key has its sharedTxTable slot touched (added, + -- removed, or modified), so they are all excluded from stableForTxTable. + -- sharedKeyToTxId only loses reaped keys; sharedRetainedTxs only gains + -- LateMempool keys (and keeps the LateRetained pre-seed entries + -- unchanged). + oldKeys = IntMap.keysSet (sharedKeyToTxId sharedStateBase) + stableForKeyMaps = oldKeys `IntSet.difference` omittedReapedKeys + stableForTxTable = oldKeys `IntSet.difference` requestedKeysSet + stableForRetained = oldKeys `IntSet.difference` lateMempoolKeys + + -- Generated peerDownloadedTxs keys (shifted) should survive untouched. + genDownloadedKeys = + IntMap.keysSet (peerDownloadedTxs peerStateShifted) in + classify (StrictSeq.null (peerUnacknowledgedTxIds peerStateGenerated)) + "generated peer-local state: empty unacknowledged queue" $ + classify (not (Map.member peeraddr (sharedPeers sharedState0))) + "peeraddr: fresh (not in generated sharedState)" $ + classify (not (IntSet.null coAdvertisedKeys)) + "requested txs include co-advertised" $ + classify (not (null bufferedGroup)) "requested txs include buffered" $ + classify (not (null omittedGroup)) "requested txs include omitted" $ + classify (not (IntSet.null omittedSurvivingKeys)) + "requested txs include omitted + surviving" $ + classify (not (IntSet.null omittedReapedKeys)) + "requested txs include omitted + reaped" $ + classify (not (null lateRetainedGroup)) "requested txs include late-retained" $ + classify (not (null lateMempoolGroup)) "requested txs include late-mempool" $ + classify (not (null prunedGroup)) "requested txs include pruned" $ + tabulate "requested" [bucket (length freshenedTxids)] $ + tabulate "buffered" [bucket (length bufferedGroup)] $ + tabulate "omitted" [bucket (length omittedGroup)] $ + tabulate "late-retained" [bucket (length lateRetainedGroup)] $ + tabulate "late-mempool" [bucket (length lateMempoolGroup)] $ + tabulate "pruned" [bucket (length prunedGroup)] $ + tabulate "sharedState peers" [bucket (Map.size (sharedPeers sharedStateBase))] $ conjoin - [ omittedCount === 1 - , lateCount === 0 + [ omittedCount === length omittedGroup + length prunedGroup + , lateCount === length lateRetainedGroup + length lateMempoolGroup , peerRequestedTxs peerState' === IntSet.empty + , peerRequestedTxBatches peerState' === StrictSeq.empty , peerRequestedTxsSize peerState' === 0 - , peerDownloadedTxs peerState' === IntMap.singleton kA txA , peerAvailableTxIds peerState' === IntMap.empty - , fmap (Map.lookup peeraddr . txAttempts) (IntMap.lookup kA (sharedTxTable sharedState')) === Just (Just TxBuffered) - , IntMap.lookup kB (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) - , retainedLookup kB (sharedRetainedTxs sharedState') === Nothing - , Map.lookup (getRawTxId txidB) (sharedTxIdToKey sharedState') === Nothing - , IntMap.lookup kB (sharedKeyToTxId sharedState') === Nothing - , sharedGeneration sharedState' === 1 + , peerUnacknowledgedTxIds peerState' === peerUnacknowledgedTxIds peerState0 + , peerRequestedTxIds peerState' === peerRequestedTxIds peerState0 + , peerDownloadStartTime peerState' === peerDownloadStartTime peerState0 + , peerScore peerState' === peerScore peerState0 + , counterexample "generated peerDownloadedTxs entries not preserved" + (IntMap.restrictKeys (peerDownloadedTxs peerState') genDownloadedKeys + === peerDownloadedTxs peerStateShifted) + , counterexample "buffered bodies not all inserted into peerDownloadedTxs" + (IntMap.restrictKeys (peerDownloadedTxs peerState') + (IntSet.fromList + [ unTxKey (lookupKeyOrFail txid sharedState') + | ((txid, _), _) <- bufferedGroup + ]) + === IntMap.fromList + [ (unTxKey (lookupKeyOrFail txid sharedState'), mkTx txid size) + | ((txid, size), _) <- bufferedGroup + ]) + , Map.withoutKeys (sharedPeers sharedState') affectedPeers + === Map.withoutKeys (sharedPeers sharedStateBase) affectedPeers + , sharedPeerAdvertisedTxKeys (lookupPeerOrFail peeraddr sharedState') + === expectedPeerAdvertisedPost + , IntMap.restrictKeys (sharedTxTable sharedState') stableForTxTable + === IntMap.restrictKeys (sharedTxTable sharedStateBase) stableForTxTable + , IntMap.restrictKeys (sharedKeyToTxId sharedState') stableForKeyMaps + === IntMap.restrictKeys (sharedKeyToTxId sharedStateBase) stableForKeyMaps + , retainedRestrictKeys (sharedRetainedTxs sharedState') stableForRetained + === retainedRestrictKeys (sharedRetainedTxs sharedStateBase) stableForRetained + , sharedGeneration sharedState' === sharedGeneration sharedStateBase + 1 + , conjoin (fmap checkBufferedEntry bufferedGroup) + , conjoin [ checkOmittedSurvivingEntry e + | e <- omittedGroup + , keyIntOf e `IntSet.member` coAdvertisedKeys + ] + , conjoin [ checkOmittedReapedEntry e + | e <- omittedGroup + , not (keyIntOf e `IntSet.member` coAdvertisedKeys) + ] + , conjoin (fmap checkLateRetainedEntry lateRetainedGroup) + , conjoin (fmap checkLateMempoolEntry lateMempoolGroup) + , conjoin (fmap checkPrunedEntry prunedAllocations) + , checkOtherPeerState + , counterexample "combined invariant violated before the call" + (combinedStateInvariant StrongInvariant peeraddr peerState0 sharedStateBase) + , counterexample "combined invariant violated after the call" + (combinedStateInvariant StrongInvariant peeraddr peerState' sharedState') , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) , checkNoThunks "sharedState'" sharedState' ] - where - txidA = abs txidA0 + 1 - txidB = abs txidB0 + 2 - txSizeA = mkSize txSizeA0 - txSizeB = mkSize txSizeB0 - txA = mkTx txidA txSizeA - sharedState0 = - let st = mkSharedState [txidA, txidB] - keyA' = lookupKeyOrFail txidA st - keyB' = lookupKeyOrFail txidB st in - ensurePeerAdvertisesTxKeys peeraddr [keyA', keyB'] $ - st { - sharedTxTable = IntMap.fromList - [ (unTxKey keyA', mkTxEntry peeraddr txSizeA (Just TxDownloading)) - , (unTxKey keyB', mkTxEntry peeraddr txSizeB (Just TxDownloading)) - ] - } - keyA = lookupKeyOrFail txidA sharedState0 - keyB = lookupKeyOrFail txidB sharedState0 - kA = unTxKey keyA - kB = unTxKey keyB - peerState0 = emptyPeerTxLocalState - { peerAvailableTxIds = IntMap.fromList [(kA, txSizeA), (kB, txSizeB)] - , peerRequestedTxs = IntSet.fromList [kA, kB] - , peerRequestedTxBatches = StrictSeq.singleton (mkRequestedTxBatch [keyA, keyB] (txSizeA + txSizeB)) - , peerRequestedTxsSize = txSizeA + txSizeB - } - (omittedCount, lateCount, peerState', sharedState') = handleReceivedTxs (const False) now defaultTxDecisionPolicy peeraddr [(txidA, txA)] peerState0 sharedState0 - --- Verifies that handleReceivedTxs drops late bodies when the tx is already --- retained or already present in the mempool. -prop_handleReceivedTxs_dropsLateBodies - :: Positive Int - -> TxId - -> Positive Int - -> Property -prop_handleReceivedTxs_dropsLateBodies (Positive peeraddr) txid0 txSize0 = - conjoin - [ omittedRetained === 0 - , lateRetained === 1 - , peerStateRetained' === peerState0 - { peerAvailableTxIds = IntMap.empty - , peerRequestedTxs = IntSet.empty - , peerRequestedTxBatches = StrictSeq.empty - , peerRequestedTxsSize = 0 - } - , sharedTxTable sharedStateRetained' === IntMap.empty - , retainedLookup k (sharedRetainedTxs sharedStateRetained') === Just retainedUntil - , omittedMempool === 0 - , lateMempool === 1 - , peerStateMempool' === peerState0 - { peerAvailableTxIds = IntMap.empty - , peerRequestedTxs = IntSet.empty - , peerRequestedTxBatches = StrictSeq.empty - , peerRequestedTxsSize = 0 - } - , sharedTxTable sharedStateMempool' === IntMap.empty - , retainedLookup k (sharedRetainedTxs sharedStateMempool') === Just retainedUntil - , checkNoThunks "peerStateRetained" (peerStateRetained' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "peerStateMempool" (peerStateMempool' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedStateRetained" sharedStateRetained' - , checkNoThunks "sharedStateMempool" sharedStateMempool' - ] - where - txid = abs txid0 + 1 - txSize = mkSize txSize0 - tx = mkTx txid txSize - sharedStateBase = - let st = mkSharedState [txid] - key' = lookupKeyOrFail txid st in - ensurePeerAdvertisesTxKeys peeraddr [key'] $ - st { - sharedTxTable = IntMap.singleton (unTxKey key') (mkTxEntry peeraddr txSize Nothing) - } - key = lookupKeyOrFail txid sharedStateBase - k = unTxKey key - retainedUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now - peerState0 :: PeerTxLocalState (Tx TxId) - peerState0 = emptyPeerTxLocalState - { peerAvailableTxIds = IntMap.singleton k txSize - , peerRequestedTxs = IntSet.singleton k - , peerRequestedTxBatches = StrictSeq.singleton (mkRequestedTxBatch [key] txSize) - , peerRequestedTxsSize = txSize - } - sharedStateRetained0 = sharedStateBase { - sharedTxTable = IntMap.empty, - sharedRetainedTxs = retainedSingleton k retainedUntil - } - (omittedRetained, lateRetained, peerStateRetained', sharedStateRetained') = - handleReceivedTxs (const False) now defaultTxDecisionPolicy peeraddr [(txid, tx)] peerState0 sharedStateRetained0 - (omittedMempool, lateMempool, peerStateMempool', sharedStateMempool') = - handleReceivedTxs (== txid) now defaultTxDecisionPolicy peeraddr [(txid, tx)] peerState0 sharedStateBase - --- Verifies that omitting a requested body still counts as a penalty even if --- the tx has already been fully pruned from shared state by the time the --- reply is processed. -prop_handleReceivedTxs_penalizesOmittedAfterPrune - :: Positive Int - -> TxId - -> Positive Int - -> Property -prop_handleReceivedTxs_penalizesOmittedAfterPrune (Positive peeraddr) txid0 txSize0 = - conjoin - [ omittedCount === 1 - , lateCount === 0 - , peerAvailableTxIds peerState' === IntMap.empty - , peerRequestedTxs peerState' === IntSet.empty - , peerRequestedTxBatches peerState' === StrictSeq.empty - , peerRequestedTxsSize peerState' === 0 - , peerDownloadedTxs peerState' === (IntMap.empty :: IntMap.IntMap (Tx TxId)) - , sharedTxTable sharedState' === IntMap.empty - , retainedLookup k (sharedRetainedTxs sharedState') === Nothing - , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Nothing - , IntMap.lookup k (sharedKeyToTxId sharedState') === Nothing - , sharedGeneration sharedState' === 1 - , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedState'" sharedState' - ] - where - txid = abs txid0 + 1 - txSize = mkSize txSize0 - sharedStateBase = - let st = mkSharedState [txid] - key' = lookupKeyOrFail txid st in - ensurePeerAdvertisesTxKeys peeraddr [key'] $ - st { - sharedTxTable = IntMap.singleton (unTxKey key') (mkTxEntry peeraddr txSize (Just TxDownloading)) - } - key = lookupKeyOrFail txid sharedStateBase - k = unTxKey key - peerState0 = emptyPeerTxLocalState - { peerAvailableTxIds = IntMap.singleton k txSize - , peerRequestedTxs = IntSet.singleton k - , peerRequestedTxBatches = StrictSeq.singleton (mkRequestedTxBatch [key] txSize) - , peerRequestedTxsSize = txSize - } - sharedStatePruned :: SharedTxState PeerAddr TxId - sharedStatePruned = sharedStateBase - { sharedTxTable = IntMap.empty - , sharedRetainedTxs = retainedEmpty - , sharedTxIdToKey = Map.empty - , sharedKeyToTxId = IntMap.empty - } - (omittedCount, lateCount, peerState', sharedState') = - handleReceivedTxs (const False) now defaultTxDecisionPolicy peeraddr [] peerState0 sharedStatePruned -- Verifies that handleSubmittedTxs retains accepted txs and removes rejected -- txs from the active table and tx-key maps. @@ -2269,9 +2561,7 @@ genPeerTxLocalState = sized $ \n -> do txSize <- genPositiveSize pure (unTxKey key, mkTx (txIdForKey key) txSize) -data PeerSeed = PeerSeed { - peerSeedGeneration :: !Word64 - } +newtype PeerSeed = PeerSeed { peerSeedGeneration :: Word64 } data PeerDerivedUsage = PeerDerivedUsage { peerHasSubmitting :: !Bool @@ -2834,6 +3124,66 @@ seedActiveTxidsForOtherPeer otherPeer entries st0 = (IntSet.fromList activeKeys) } +-- Intern each requested txid and add an active sharedTxTable entry leased to +-- @peeraddr@ with a TxDownloading attempt. Entries whose Bool tag is True +-- (co-advertised) are also advertised by @otherPeer@, giving them +-- @txAdvertiserCount = 2@ so the omitted-and-released path leaves them +-- alive (instead of being reaped by dropDeadActiveKeys). Used by the +-- handleReceivedTxs property to build a coherent pre-state for one +-- outstanding request batch. +seedRequestedActiveTxids + :: PeerAddr + -> Maybe PeerAddr + -> [((TxId, SizeInBytes), Bool)] + -> SharedTxState PeerAddr TxId + -> SharedTxState PeerAddr TxId +seedRequestedActiveTxids peeraddr otherPeerOpt tagged st0 = + stFinal + where + entries = fmap fst tagged + (_, stInterned) = internTxIds (fmap fst entries) st0 + leaseUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now + + perKey :: [(Int, Bool)] + perKey = + [ ( unTxKey (lookupKeyOrFail txid stInterned) + , coAdv && isJust otherPeerOpt + ) + | ((txid, _), coAdv) <- tagged + ] + + mkEntry coAdv = TxEntry { + txLease = TxLeased peeraddr leaseUntil, + txAdvertiserCount = if coAdv then 2 else 1, + txAttempts = Map.singleton peeraddr TxDownloading + } + + stWithTable = stInterned { + sharedTxTable = + foldl' (\tbl (k, coAdv) -> IntMap.insert k (mkEntry coAdv) tbl) + (sharedTxTable stInterned) + perKey + } + + peerAdvertisedAll = IntSet.fromList (map fst perKey) + otherAdvertisedAll = IntSet.fromList [ k | (k, True) <- perKey ] + + augmentWith addKeys sps = sps { + sharedPeerAdvertisedTxKeys = + IntSet.union (sharedPeerAdvertisedTxKeys sps) addKeys + } + + stFinal = stWithTable { + sharedPeers = + let withPeer = + Map.adjust (augmentWith peerAdvertisedAll) peeraddr + (sharedPeers stWithTable) + in case otherPeerOpt of + Just op | not (IntSet.null otherAdvertisedAll) -> + Map.adjust (augmentWith otherAdvertisedAll) op withPeer + _ -> withPeer + } + -- Find the first txid not present in the reserved set. firstFreshTxId :: Set.Set RawTxId -> TxId -> TxId firstFreshTxId used = go From d4018a57c515c62e2fcafdb5e25d60a768f97d7b Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Wed, 22 Apr 2026 16:18:22 +0200 Subject: [PATCH 38/67] fixup: remove mempool read arg The mempool reader is not used anymore. --- .../Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs | 1 - ouroboros-network/bench/Bench/TxSubmissionV2Server.hs | 1 - .../lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs | 3 --- .../tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs | 1 - 4 files changed, 6 deletions(-) diff --git a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs index b5f3dd93192..8a9101196e0 100644 --- a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs +++ b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs @@ -730,7 +730,6 @@ applications debugTracer txSubmissionInboundTracer _txSubmissionInboundDebug nod txSubmissionInboundTracer NoTxSubmissionInitDelay aaTxDecisionPolicy - (getMempoolReader mempool) (getMempoolWriter duplicateTxVar mempool) getTxSize api diff --git a/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs b/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs index 038e7540bd4..ce4c42180c6 100644 --- a/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs +++ b/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs @@ -96,7 +96,6 @@ runDirectServerBenchmark nullTracer NoTxSubmissionInitDelay defaultTxDecisionPolicy - (getMempoolReader inboundMempool) (getMempoolWriter duplicateTxIdsVar inboundMempool) getTxSize api diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs index 02a98f8afcd..40b92398408 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs @@ -38,7 +38,6 @@ import Ouroboros.Network.TxSubmission.Inbound.V2.Policy import Ouroboros.Network.TxSubmission.Inbound.V2.Registry as V2 import Ouroboros.Network.TxSubmission.Inbound.V2.State qualified as State import Ouroboros.Network.TxSubmission.Inbound.V2.Types as V2 -import Ouroboros.Network.TxSubmission.Mempool.Reader -- The same Stateful types as V1 uses. newtype Stateful s n txid tx m = Stateful (s -> ServerStIdle n txid tx m ()) @@ -89,7 +88,6 @@ txSubmissionInboundV2 => Tracer m (TraceTxSubmissionInbound txid tx) -> TxSubmissionInitDelay -> TxDecisionPolicy - -> TxSubmissionMempoolReader txid tx idx m -> TxSubmissionMempoolWriter txid tx idx m err -> (tx -> SizeInBytes) -> PeerTxAPI m txid tx @@ -98,7 +96,6 @@ txSubmissionInboundV2 tracer initDelay policy - TxSubmissionMempoolReader {} TxSubmissionMempoolWriter { txId, mempoolAddTxs } txSize PeerTxAPI { diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs index b64a28c203e..81296c2419b 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs @@ -210,7 +210,6 @@ runTxSubmission tracer _tracerTxLogic st0 txDecisionPolicy = do txSubmissionInboundV2 sayTracer NoTxSubmissionInitDelay txDecisionPolicy - (getMempoolReader inboundMempool) (getMempoolWriter duplicateTxIdsVar inboundMempool) getTxSize api From 86f75fc01485da6a347cfc8eeb8f92a5b421594a Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Wed, 22 Apr 2026 16:19:23 +0200 Subject: [PATCH 39/67] fixup: inline some hotpath functions Inline some hot path functions. The old NOINLINE pragmas where a leftover from V1, and not applicable to V2 since we don't run unsafeNoThunks between states. --- .../lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs | 5 +++-- .../lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs | 6 ++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs index 40b92398408..3fc0f76efda 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs @@ -53,13 +53,14 @@ continueWithState :: Stateful s n txid tx m -> ServerStIdle n txid tx m () continueWithState (Stateful f) !st = f st +{-# INLINE continueWithState #-} continueWithStateM :: StatefulM s n txid tx m -> s -> m (ServerStIdle n txid tx m ()) continueWithStateM (StatefulM f) !st = f st -{-# NOINLINE continueWithStateM #-} +{-# INLINE continueWithStateM #-} collectAndContinueWithState :: StatefulCollect s n txid tx m -> s @@ -67,7 +68,7 @@ collectAndContinueWithState :: StatefulCollect s n txid tx m -> m (ServerStIdle n txid tx m ()) collectAndContinueWithState (StatefulCollect f) !st c = f st c -{-# NOINLINE collectAndContinueWithState #-} +{-# INLINE collectAndContinueWithState #-} -- | A tx-submission inbound side (server, sic!). -- diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index a2f504b7587..1cacbea7594 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -105,6 +105,7 @@ nextPeerAction :: (Ord peeraddr, HasRawTxId txid) -> SharedTxState peeraddr txid -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) nextPeerAction = nextPeerActionWithMode AllowAnyTxIdRequests +{-# INLINABLE nextPeerAction #-} -- | Pipelined version of nextPeerAction nextPeerActionPipelined :: (Ord peeraddr, HasRawTxId txid) @@ -115,6 +116,7 @@ nextPeerActionPipelined :: (Ord peeraddr, HasRawTxId txid) -> SharedTxState peeraddr txid -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) nextPeerActionPipelined = nextPeerActionWithMode AllowPipelinedTxIdRequests +{-# INLINABLE nextPeerActionPipelined #-} -- | V2 peer-thread scheduler -- @@ -752,6 +754,7 @@ sweepSharedState now st isOrphan TxEntry { txLease = TxLeased {} } = False isOrphan TxEntry { txAdvertiserCount, txAttempts } = txAdvertiserCount == 0 && Map.null txAttempts +{-# INLINABLE sweepSharedState #-} -- | Is the TX entry alive? -- @@ -1077,6 +1080,7 @@ handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = else txAdvertiserCount, txAttempts = Map.delete peeraddr txAttempts } +{-# INLINABLE handleReceivedTxs #-} -- | Handle the result of submitting buffered txs to the mempool. @@ -1181,6 +1185,7 @@ handleSubmittedTxs now policy peeraddr acceptedTxs rejectedTxs peerState sharedS else txAdvertiserCount, txAttempts = Map.delete peeraddr txAttempts } +{-# INLINABLE handleSubmittedTxs #-} -- | Mark buffered txs as entering mempool submission. @@ -1404,3 +1409,4 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize } ) where rawId = getRawTxId txid +{-# INLINABLE handleReceivedTxIds #-} From cddef432bddc0788944522e426c0b76df7d7eec2 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Thu, 23 Apr 2026 14:34:52 +0200 Subject: [PATCH 40/67] fixup: all advertise should be notified Not only idle peers but all peers should be notified when something changes for one of their TX.s --- .../TxSubmission/Inbound/V2/Registry.hs | 4 +- .../Network/TxSubmission/Inbound/V2/State.hs | 8 +- .../Network/TxSubmission/Inbound/V2/Types.hs | 24 ++-- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 110 ++++++++++++++---- 4 files changed, 108 insertions(+), 38 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index a027d2f4448..65ae9904e59 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -203,7 +203,7 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar unregisterPeer :: Time -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid unregisterPeer now st@SharedTxState { sharedPeers, sharedTxTable, sharedRetainedTxs, sharedTxIdToKey, sharedKeyToTxId, sharedGeneration } = - bumpIdlePeerGenerations peersToWake $ st { + bumpPeerGenerations peersToWake $ st { sharedPeers = sharedPeers', sharedTxTable = sharedTxTable', sharedRetainedTxs = sharedRetainedTxs, @@ -544,7 +544,7 @@ updatePeerPhase peeraddr peerPhaseNew let sharedPeerState' = sharedPeerState { sharedPeerPhase = peerPhaseNew } st' = st { sharedPeers = Map.insert peeraddr sharedPeerState' sharedPeers , sharedGeneration = sharedGeneration + 1 } - in bumpIdlePeerGenerations (phaseWakePeers peerPhaseOld) st' + in bumpPeerGenerations (phaseWakePeers peerPhaseOld) st' else st _ -> st -- TODO error? where diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 1cacbea7594..dd701e3e402 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -220,7 +220,7 @@ applyRequestTxsChoice ctx txsToRequest txsToRequestSize txTable = peerRequestedTxsSize = peerRequestedTxsSize (pacPeerState ctx) + txsToRequestSize } sharedState'' = - bumpIdlePeerGenerations + bumpPeerGenerations (advertisingPeersForTxKeysExcept (pacPeerAddr ctx) requestedKeys (pacSharedState ctx)) ((pacSharedState ctx) { sharedTxTable = txTable, @@ -952,7 +952,7 @@ handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = -- Flag peers that may now have work available after processing txs. sharedState' = - bumpIdlePeerGenerations + bumpPeerGenerations (Set.union receivedWakePeers omittedWakePeers) sharedState'' @@ -1120,7 +1120,7 @@ handleSubmittedTxs now policy peeraddr acceptedTxs rejectedTxs peerState sharedS IntSet.foldl' updateRejected (sharedStateAfterRejectedPeer, Set.empty) rejectedKeys sharedState' = - bumpIdlePeerGenerations + bumpPeerGenerations (Set.union acceptedAdvertisers rejectedWakePeers) sharedState'' @@ -1274,7 +1274,7 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize sharedState'' | sharedChanged || peerAdvertisedKeys' /= peerAdvertisedKeys0 = - bumpIdlePeerGenerations peersToWake $ + bumpPeerGenerations peersToWake $ sharedStateHandled { sharedPeers = if peerAdvertisedKeys' == peerAdvertisedKeys0 diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index c8116d29416..d5936f2caa3 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -59,7 +59,7 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.Types , PeerTxLocalState (..) , SharedPeerState (..) , peerGenerationOf - , bumpIdlePeerGenerations + , bumpPeerGenerations -- TxKey with helper functions , TxKey (..) , lookupTxKey @@ -565,22 +565,22 @@ peerGenerationOf peeraddr SharedTxState { sharedPeers } = Just SharedPeerState { sharedPeerGeneration } -> sharedPeerGeneration Nothing -> error "TxSubmission.V2.peerGenerationOf: missing peer" -bumpIdlePeerGenerations :: Ord peeraddr - => Set.Set peeraddr - -> SharedTxState peeraddr txid - -> SharedTxState peeraddr txid -bumpIdlePeerGenerations peersToWake st@SharedTxState { sharedPeers } = +bumpPeerGenerations :: Ord peeraddr + => Set.Set peeraddr + -> SharedTxState peeraddr txid + -> SharedTxState peeraddr txid +bumpPeerGenerations peersToWake st@SharedTxState { sharedPeers } = st { sharedPeers = foldl' bumpOne sharedPeers (Set.toList peersToWake) } where bumpOne peersMap peeraddr = - Map.adjust bumpIdlePeer peeraddr peersMap - where - bumpIdlePeer sharedPeerState@SharedPeerState { sharedPeerPhase, sharedPeerGeneration } - | sharedPeerPhase == PeerIdle = - sharedPeerState { sharedPeerGeneration = sharedPeerGeneration + 1 } - | otherwise = sharedPeerState + Map.adjust + (\sharedPeerState -> + sharedPeerState + { sharedPeerGeneration = sharedPeerGeneration sharedPeerState + 1 }) + peeraddr + peersMap lookupTxKey :: HasRawTxId txid => txid diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index a2f44bc8233..1b18ab469d4 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -68,6 +68,7 @@ tests = , testProperty "handleSubmittedTxs retains accepted and drops rejected" prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected , testProperty "nextPeerAction prioritises submitting buffered owned txs" prop_nextPeerAction_prioritisesSubmit , testProperty "nextPeerAction claims claimable tx for best idle advertiser" prop_nextPeerAction_claimsClaimableTx + , testProperty "nextPeerAction claims a released tx from another advertiser" prop_nextPeerAction_claimsRejectedTxFromOtherAdvertiser , testCaseSteps "nextPeerAction claims a tx once the score delay threshold has elapsed" unit_nextPeerAction_claimsAtScoreDelayThreshold , testProperty "nextPeerAction steals expired lease for best idle advertiser" prop_nextPeerAction_claimsExpiredLease , testProperty "nextPeerAction requests an oversized first tx within the soft budget" prop_nextPeerAction_requestsOversizedFirstTx @@ -85,7 +86,7 @@ tests = , testProperty "nextPeerAction keeps retained txs before expiry" prop_nextPeerAction_keepsRetained , testProperty "PeerDoNothing waits for the earliest shared expiry" prop_nextPeerAction_earliestWakeDelay , testProperty "PeerDoNothing uses the current peer generation" prop_nextPeerAction_returnsPeerGeneration - , testProperty "handleSubmittedTxs bumps idle advertiser generations" prop_handleSubmittedTxs_bumpsIdleAdvertisers + , testProperty "handleSubmittedTxs bumps advertiser generations" prop_handleSubmittedTxs_bumpsAdvertisers , testCaseSteps "advertisingPeersForTxExcept scans the authoritative peer key sets" unit_advertisingPeersForTxExcept_scansAuthoritativePeerSets , testCaseSteps "removeAdvertisingPeersForResolvedTx clears all advertising peers for a resolved key" unit_removeAdvertisingPeersForResolvedTx_clearsAllAdvertisingPeers , testCaseSteps "updatePeerPhase only wakes the peer becoming idle" unit_updatePeerPhase_wakesOnlyBecomingIdlePeer @@ -746,11 +747,8 @@ prop_handleReceivedTxIds case otherPeerOpt of Just op | not (null mempoolResolveActiveGroup) -> let original = lookupPeerOrFail op sharedStateWithRetained - post = lookupPeerOrFail op sharedState' - bumpIfIdle g - | sharedPeerPhase original == PeerIdle = g + 1 - | otherwise = g - in conjoin + post = lookupPeerOrFail op sharedState' in + conjoin [ counterexample "other peer's advertised keys not restored" (sharedPeerAdvertisedTxKeys post === sharedPeerAdvertisedTxKeys original) @@ -758,7 +756,7 @@ prop_handleReceivedTxIds (sharedPeerPhase post === sharedPeerPhase original) , counterexample "other peer's generation bump mismatch" (sharedPeerGeneration post - === bumpIfIdle (sharedPeerGeneration original)) + === sharedPeerGeneration original + 1) ] _ -> property True @@ -1214,17 +1212,14 @@ prop_handleReceivedTxs ] -- otherPeer is added to wakePeers when any surviving omitted entry - -- exists; its generation is bumped iff it was PeerIdle. LateMempool - -- is set up single-advertiser so it does not wake otherPeer here. + -- exists; its generation is bumped unconditionally. LateMempool is + -- set up single-advertiser so it does not wake otherPeer here. checkOtherPeerState = case otherPeerOpt of Just op | not (IntSet.null omittedSurvivingKeys) -> let original = lookupPeerOrFail op sharedStateBase - post = lookupPeerOrFail op sharedState' - bumpIfIdle g - | sharedPeerPhase original == PeerIdle = g + 1 - | otherwise = g - in conjoin + post = lookupPeerOrFail op sharedState' in + conjoin [ counterexample "other peer's advertised keys changed" (sharedPeerAdvertisedTxKeys post === sharedPeerAdvertisedTxKeys original) @@ -1232,7 +1227,7 @@ prop_handleReceivedTxs (sharedPeerPhase post === sharedPeerPhase original) , counterexample "other peer's generation bump mismatch" (sharedPeerGeneration post - === bumpIfIdle (sharedPeerGeneration original)) + === sharedPeerGeneration original + 1) ] _ -> property True @@ -1520,6 +1515,80 @@ unit_nextPeerAction_claimsAtScoreDelayThreshold step = do (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 +-- | A peer whose submission attempt has been cleared (e.g. after a +-- mempool rejection) must not prevent another advertiser from claiming +-- the same tx. After one peer's attempt is cleared and its lease +-- released back to 'TxClaimable', any other peer that still advertises +-- the tx should be able to claim it on its next 'nextPeerAction' pass. +-- +-- Exercises the cross-peer retry invariant in the 'txSelectable' / +-- 'nextPeerAction' path: once no peer has an outstanding attempt on a +-- tx and its lease is claimable, a still-advertising peer is eligible +-- to re-claim. +prop_nextPeerAction_claimsRejectedTxFromOtherAdvertiser :: Property +prop_nextPeerAction_claimsRejectedTxFromOtherAdvertiser = + counterexample "peerB should be able to claim the released tx" + $ case action of + PeerRequestTxs keys -> + counterexample ("keys requested: " ++ show keys) + $ TxKey txKeyInt `elem` keys + _ -> + counterexample ("peerB action: " ++ show action) False + where + peerA, peerB :: PeerAddr + peerA = 1 + peerB = 2 + + txid :: TxId + txid = 4 + + txKeyInt :: Int + txKeyInt = 0 + + txSize :: SizeInBytes + txSize = 100 + + txBody :: Tx TxId + txBody = mkTx txid txSize + + sharedState0 :: SharedTxState PeerAddr TxId + sharedState0 = + ensurePeerAdvertisesTxKeys peerA [TxKey txKeyInt] + $ ensurePeerAdvertisesTxKeys peerB [TxKey txKeyInt] + $ emptySharedTxState + { sharedTxIdToKey = Map.singleton (getRawTxId txid) (TxKey txKeyInt) + , sharedKeyToTxId = IntMap.singleton txKeyInt txid + , sharedNextTxKey = txKeyInt + 1 + , sharedTxTable = IntMap.singleton txKeyInt TxEntry + { txLease = TxLeased peerA (addTime 1 now) + , txAdvertiserCount = 2 + , txAttempts = Map.singleton peerA TxBuffered + } + } + + peerAState :: PeerTxLocalState (Tx TxId) + peerAState = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton (TxKey txKeyInt) + , peerDownloadedTxs = IntMap.singleton txKeyInt txBody + } + + peerBState :: PeerTxLocalState (Tx TxId) + peerBState = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton (TxKey txKeyInt) + , peerAvailableTxIds = IntMap.singleton txKeyInt txSize + } + + (_peerAStateAfter, sharedStateAfterRejection) = + handleSubmittedTxs now defaultTxDecisionPolicy peerA + [] + [TxKey txKeyInt] + peerAState + sharedState0 + + (action, _peerBStateAfter, _sharedStateFinal) = + nextPeerAction now defaultTxDecisionPolicy peerB peerBState + sharedStateAfterRejection + -- Verifies that nextPeerAction can steal an expired lease for the best idle -- advertiser and request that tx. prop_nextPeerAction_claimsExpiredLease @@ -2324,20 +2393,21 @@ prop_nextPeerAction_returnsPeerGeneration (Positive peeraddr) = peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } (peerAction, _, _) = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 --- Verifies that handleSubmittedTxs bumps idle advertisers while leaving --- submitting and waiting advertisers unchanged. -prop_handleSubmittedTxs_bumpsIdleAdvertisers +-- Verifies that handleSubmittedTxs bumps the generation of every other +-- advertiser of the resolved tx, regardless of phase, while leaving the +-- submitting peer's own generation unchanged. +prop_handleSubmittedTxs_bumpsAdvertisers :: Positive Int -> Positive Int -> Positive Int -> TxId -> Positive Int -> Property -prop_handleSubmittedTxs_bumpsIdleAdvertisers (Positive owner0) (Positive peerA0) (Positive peerB0) txid0 txSize0 = +prop_handleSubmittedTxs_bumpsAdvertisers (Positive owner0) (Positive peerA0) (Positive peerB0) txid0 txSize0 = owner /= peerA && owner /= peerB && peerA /= peerB ==> conjoin [ sharedPeerGeneration (lookupPeerOrFail peerA sharedState') === 1 - , sharedPeerGeneration (lookupPeerOrFail peerB sharedState') === 0 + , sharedPeerGeneration (lookupPeerOrFail peerB sharedState') === 1 , sharedPeerGeneration (lookupPeerOrFail owner sharedState') === 0 ] where From be9afc6423fbe352bcffaa03f1bdb8ca601e0328 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Thu, 23 Apr 2026 16:44:21 +0200 Subject: [PATCH 41/67] fixup: fix tx submission order When picking TXs to submit to the mempool stop at gaps. When picking TXs to download use the peer's advertisement order as a guide, not the TxKey order. --- .../Network/TxSubmission/Inbound/V2/State.hs | 55 +++++++++++++------ .../Ouroboros/Network/TxSubmission/TxLogic.hs | 38 +++++++++---- 2 files changed, 65 insertions(+), 28 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index dd701e3e402..e703941f3b9 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -277,18 +277,33 @@ pickSubmitAction PeerActionContext { pacPeerAddr, pacPeerState, pacSharedState } else Just txsToSubmit where - -- Filters the unacknowledged txid queue for bodies buffered by this peer - -- that are not currently being submitted by another advertiser. - -- Returns the list of tx keys ready for immediate submission in the order they - -- were originally advertised by the peer. - pickBufferedTxsToSubmit = - [ txKey - | txKey@(TxKey k) <- toList (peerUnacknowledgedTxIds pacPeerState) - , IntMap.member k (peerDownloadedTxs pacPeerState) - , Just txEntry <- [IntMap.lookup k (sharedTxTable pacSharedState)] - , txBufferedByPeer pacPeerAddr txEntry - , not (txSubmittingByOther pacPeerAddr txEntry) - ] + -- Walk the unacknowledged txid queue in peer advertisement order, picking + -- bodies buffered by this peer for immediate submission. Stop at the + -- first tx that is unresolved and not available from this peer: later + -- txs in the peer's stream must not run ahead of earlier ones, otherwise + -- a tx may be offered to the mempool before a transaction it depends on + -- is confirmed. Txs already resolved elsewhere (present in + -- 'sharedRetainedTxs') are skipped over since no further action is + -- needed for them. + pickBufferedTxsToSubmit = go [] (toList (peerUnacknowledgedTxIds pacPeerState)) + where + go acc [] = reverse acc + go acc (txKey@(TxKey k) : rest) = + case IntMap.lookup k (sharedTxTable pacSharedState) of + Just txEntry + | IntMap.member k (peerDownloadedTxs pacPeerState) + , txBufferedByPeer pacPeerAddr txEntry + , not (txSubmittingByOther pacPeerAddr txEntry) -> + go (txKey : acc) rest + _ | retainedMember k (sharedRetainedTxs pacSharedState) -> + -- already resolved via another peer + go acc rest + _ | not (IntMap.member k (peerAvailableTxIds pacPeerState)) + , not (IntMap.member k (peerDownloadedTxs pacPeerState)) -> + -- we have already finished with this tx (previously + -- submitted, or never had a body to submit) + go acc rest + _ -> reverse acc -- | Select transactions to request from the peer, if within policy limits. -- @@ -325,18 +340,22 @@ pickRequestTxsAction ctx@PeerActionContext { pacNow, pacPolicy, pacPeerState, pa leaseUntil = addTime (interTxSpace pacPolicy) pacNow - -- We pick which TXs to download based on TxKey in ascending order. - -- This makes it likely (but not guaranteed) that we end up downloading - -- TXs in the order the peer presented them to us. + -- Iterate the peer's unacknowledged queue, which preserves the peer's + -- advertisement order. Peers are expected to advertise in chain- + -- topological order (parents before children), so walking in that + -- order aligns fetch order with submission-validity order and a + -- child is never requested ahead of its parent when the same peer + -- carries both. candidates = [ (k, txSize) - | (k, txSize) <- IntMap.toAscList (peerAvailableTxIds pacPeerState) + | TxKey k <- toList (peerUnacknowledgedTxIds pacPeerState) , IntSet.notMember k (peerRequestedTxs pacPeerState) , IntMap.notMember k (peerDownloadedTxs pacPeerState) + , Just txSize <- [IntMap.lookup k (peerAvailableTxIds pacPeerState)] ] - -- Select transactions to request by iterating through candidates in ascending - -- key order until the size budget is consumed. + -- Select transactions to request by iterating through candidates in + -- peer advertisement order until the size budget is consumed. go selectedRev selectedSize txTable [] = (reverse selectedRev, selectedSize, txTable) go selectedRev selectedSize txTable ((k, txSize) : rest) = if exceedsBudget selectedSize txSize diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 1b18ab469d4..8eb80bd04cb 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -1438,6 +1438,7 @@ prop_nextPeerAction_claimsClaimableTx -> Property prop_nextPeerAction_claimsClaimableTx (Positive peerA0) (Positive peerB0) (Positive peerC0) txid0 txSize0 = distinctPeers ==> + peerTxLocalStateInvariant peerState0 .&&. case peerAction of PeerRequestTxs txKeys -> conjoin @@ -1474,8 +1475,11 @@ prop_nextPeerAction_claimsClaimableTx (Positive peerA0) (Positive peerB0) (Posit , txAttempts = Map.empty } } - peerState0 = emptyPeerTxLocalState { peerAvailableTxIds = IntMap.singleton k txSize - , peerScore = peerAScore } + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerAvailableTxIds = IntMap.singleton k txSize + , peerScore = peerAScore + } (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peerA peerState0 sharedState0 unit_nextPeerAction_claimsAtScoreDelayThreshold :: (String -> IO ()) -> Assertion @@ -1510,8 +1514,11 @@ unit_nextPeerAction_claimsAtScoreDelayThreshold step = do , txAttempts = Map.empty } } - peerState0 = emptyPeerTxLocalState { peerAvailableTxIds = IntMap.singleton k txSize - , peerScore = PeerScore 20 now } + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerAvailableTxIds = IntMap.singleton k txSize + , peerScore = PeerScore 20 now + } (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 @@ -1600,6 +1607,7 @@ prop_nextPeerAction_claimsExpiredLease -> Property prop_nextPeerAction_claimsExpiredLease (Positive oldOwner0) (Positive peerA0) (Positive peerB0) txid0 txSize0 = distinctPeers ==> + peerTxLocalStateInvariant peerState0 .&&. case peerAction of PeerRequestTxs txKeys -> conjoin @@ -1636,8 +1644,11 @@ prop_nextPeerAction_claimsExpiredLease (Positive oldOwner0) (Positive peerA0) (P , txAttempts = Map.empty } } - peerState0 = emptyPeerTxLocalState { peerAvailableTxIds = IntMap.singleton k txSize - , peerScore = peerAScore } + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerAvailableTxIds = IntMap.singleton k txSize + , peerScore = peerAScore + } (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peerA peerState0 sharedState0 -- Verifies that nextPeerAction still requests an oversized first tx when it @@ -1648,6 +1659,7 @@ prop_nextPeerAction_requestsOversizedFirstTx -> Positive Int -> Property prop_nextPeerAction_requestsOversizedFirstTx (Positive peeraddr) txid0 (Positive txSize0) = + peerTxLocalStateInvariant peerState0 .&&. case peerAction of PeerRequestTxs [txKey] -> conjoin @@ -1678,7 +1690,8 @@ prop_nextPeerAction_requestsOversizedFirstTx (Positive peeraddr) txid0 (Positive , sharedTxTable = IntMap.singleton k (mkTxEntry peeraddr txSize Nothing) } peerState0 = emptyPeerTxLocalState - { peerAvailableTxIds = IntMap.singleton k txSize + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerAvailableTxIds = IntMap.singleton k txSize , peerRequestedTxIds = maxNumTxIdsToRequest policy } (peerAction, peerState', sharedState') = nextPeerAction now policy peeraddr peerState0 sharedState0 @@ -1707,7 +1720,8 @@ unit_nextPeerAction_skipsBlockedAvailableTxs step = do kBlocked = 1 kClaimable = 2 peerState = emptyPeerTxLocalState - { peerAvailableTxIds = IntMap.fromList [(kBlocked, 10), (kClaimable, 11)] + { peerUnacknowledgedTxIds = StrictSeq.fromList [blockedKey, claimableKey] + , peerAvailableTxIds = IntMap.fromList [(kBlocked, 10), (kClaimable, 11)] } sharedState :: SharedTxState PeerAddr TxId sharedState = emptySharedTxState @@ -2117,6 +2131,7 @@ prop_nextPeerActionPipelined_secondBodyBatch -> Property prop_nextPeerActionPipelined_secondBodyBatch (Positive peeraddr) txidA0 txidB0 txSizeA0 txSizeB0 = txidA /= txidB ==> + peerTxLocalStateInvariant peerState0 .&&. case peerAction of PeerRequestTxs [txKey] -> conjoin @@ -2141,7 +2156,8 @@ prop_nextPeerActionPipelined_secondBodyBatch (Positive peeraddr) txidA0 txidB0 t kA = unTxKey keyA kB = unTxKey keyB peerState0 = emptyPeerTxLocalState - { peerAvailableTxIds = IntMap.singleton kB txSizeB + { peerUnacknowledgedTxIds = StrictSeq.fromList [keyA, keyB] + , peerAvailableTxIds = IntMap.fromList [(kA, txSizeA), (kB, txSizeB)] , peerRequestedTxs = IntSet.singleton kA , peerRequestedTxBatches = StrictSeq.singleton (mkRequestedTxBatch [keyA] txSizeA) , peerRequestedTxsSize = txSizeA @@ -2177,6 +2193,7 @@ prop_nextPeerActionPipelined_noThirdBodyBatch -> Property prop_nextPeerActionPipelined_noThirdBodyBatch (Positive peeraddr) txidA0 txidB0 txidC0 txSizeA0 txSizeB0 txSizeC0 = distinctTxIds ==> + peerTxLocalStateInvariant peerState0 .&&. case peerAction of PeerDoNothing _ _ -> conjoin @@ -2201,7 +2218,8 @@ prop_nextPeerActionPipelined_noThirdBodyBatch (Positive peeraddr) txidA0 txidB0 kB = unTxKey keyB kC = unTxKey keyC peerState0 = emptyPeerTxLocalState - { peerAvailableTxIds = IntMap.singleton kC txSizeC + { peerUnacknowledgedTxIds = StrictSeq.fromList [keyA, keyB, keyC] + , peerAvailableTxIds = IntMap.fromList [(kA, txSizeA), (kB, txSizeB), (kC, txSizeC)] , peerRequestedTxs = IntSet.fromList [kA, kB] , peerRequestedTxBatches = StrictSeq.fromList [ mkRequestedTxBatch [keyA] txSizeA From cff5d5d116ca50a72bc2c7d4fbaf0ec6a3cb8f00 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Mon, 27 Apr 2026 10:08:33 +0200 Subject: [PATCH 42/67] fixup: improve score testing Test the peer score functionality. Use arbitrary TxDecisionPolicy instead of the defaultTxDecisionPolicy. --- .../Network/TxSubmission/Inbound/V2/Policy.hs | 2 +- .../Network/TxSubmission/Inbound/V2/State.hs | 1 + .../Ouroboros/Network/TxSubmission/TxLogic.hs | 318 +++++++++++------- 3 files changed, 201 insertions(+), 120 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs index 24bbf00a529..ac262ac97fb 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs @@ -64,7 +64,7 @@ data TxDecisionPolicy = TxDecisionPolicy { interTxSpace :: !DiffTime -- ^ space between actual requests for the same TX. } - deriving Show + deriving (Eq, Show) instance NFData TxDecisionPolicy where rnf TxDecisionPolicy{} = () diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index e703941f3b9..bc3085db0b4 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -12,6 +12,7 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.State , advertisingPeersForTxKeysExcept , advertisingPeersForTxExcept , removeAdvertisingPeersForResolvedTx + , currentPeerScore , drainPeerScore , applyPeerRejections , sweepSharedState diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 8eb80bd04cb..f6417fc7c02 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -171,15 +171,18 @@ data InvariantStrength = WeakInvariant -- and @peerRequestedTxsSize@ equals the sum of all batch sizes. peerTxLocalStateInvariant :: forall tx. - PeerTxLocalState tx + TxDecisionPolicy + -> PeerTxLocalState tx -> Property -peerTxLocalStateInvariant PeerTxLocalState { +peerTxLocalStateInvariant TxDecisionPolicy { scoreMax } + PeerTxLocalState { peerUnacknowledgedTxIds, peerAvailableTxIds, peerRequestedTxs, peerRequestedTxBatches, peerRequestedTxsSize, - peerDownloadedTxs + peerDownloadedTxs, + peerScore } = conjoin [ counterexample "requested keys are not all available" @@ -196,8 +199,14 @@ peerTxLocalStateInvariant PeerTxLocalState { (peerRequestedTxs === batchKeyUnion) , counterexample "peerRequestedTxsSize does not match the sum of batch sizes" (peerRequestedTxsSize === batchSizeSum) + , counterexample ("peerScoreValue is negative: " ++ show scoreVal) + (property (scoreVal >= 0)) + , counterexample ("peerScoreValue exceeds scoreMax: " + ++ show scoreVal ++ " > " ++ show scoreMax) + (property (scoreVal <= scoreMax)) ] where + scoreVal = peerScoreValue peerScore unackKeys = IntSet.fromList [ k | TxKey k <- toList peerUnacknowledgedTxIds ] availableKeys = IntMap.keysSet peerAvailableTxIds downloadedKeys = IntMap.keysSet peerDownloadedTxs @@ -225,14 +234,15 @@ combinedStateInvariant , Show peeraddr , Show txid ) - => InvariantStrength + => TxDecisionPolicy + -> InvariantStrength -> peeraddr -> PeerTxLocalState tx -> SharedTxState peeraddr txid -> Property -combinedStateInvariant strength peeraddr peerState sharedState = +combinedStateInvariant policy strength peeraddr peerState sharedState = conjoin - [ peerTxLocalStateInvariant peerState + [ peerTxLocalStateInvariant policy peerState , sharedTxStateInvariant strength sharedState , counterexample "advertised keys escape the peer's unacknowledged queue" (property (advertisedKeys `IntSet.isSubsetOf` unackKeys)) @@ -439,33 +449,52 @@ rfIsPruned _ = False instance Arbitrary ArbTxDecisionPolicy where arbitrary = - ArbTxDecisionPolicy <$> ( - TxDecisionPolicy . getSmall . getPositive - <$> arbitrary - <*> (getSmall . getPositive <$> arbitrary) - <*> (SizeInBytes . getPositive <$> arbitrary) - <*> choose (1, 10) - <*> (getSmall . getPositive <$> arbitrary) - <*> (realToFrac <$> choose (0 :: Double, 2)) - <*> choose (0, 1) - <*> choose (0, 1800) - <*> (realToFrac <$> choose (0 :: Double, 1))) - - shrink (ArbTxDecisionPolicy a@TxDecisionPolicy { - maxNumTxIdsToRequest, - txsSizeInflightPerPeer, - txInflightMultiplicity }) = - [ ArbTxDecisionPolicy a { maxNumTxIdsToRequest = NumTxIdsToReq x } - | (Positive (Small x)) <- shrink (Positive (Small (getNumTxIdsToReq maxNumTxIdsToRequest))) - ] - ++ - [ ArbTxDecisionPolicy a { txsSizeInflightPerPeer = SizeInBytes s } - | Positive s <- shrink (Positive (getSizeInBytes txsSizeInflightPerPeer)) - ] - ++ - [ ArbTxDecisionPolicy a { txInflightMultiplicity = x } - | Positive (Small x) <- shrink (Positive (Small txInflightMultiplicity)) - ] + frequency + [ (1, pure (ArbTxDecisionPolicy defaultTxDecisionPolicy)) + , (9, ArbTxDecisionPolicy <$> ( + TxDecisionPolicy . getSmall . getPositive + <$> arbitrary + <*> (getSmall . getPositive <$> arbitrary) + <*> (SizeInBytes . getPositive <$> arbitrary) + <*> choose (1, 10) + <*> (getSmall . getPositive <$> arbitrary) + <*> (realToFrac <$> choose (0 :: Double, 2)) + <*> choose (0, 1) + <*> choose (0, 1800) + <*> (realToFrac <$> choose (0 :: Double, 1)))) + ] + + shrink (ArbTxDecisionPolicy a) + | a == defaultTxDecisionPolicy = [] + | otherwise = + ArbTxDecisionPolicy defaultTxDecisionPolicy + : [ ArbTxDecisionPolicy a { maxNumTxIdsToRequest = NumTxIdsToReq x } + | (Positive (Small x)) <- shrink (Positive (Small (getNumTxIdsToReq (maxNumTxIdsToRequest a)))) + ] + ++ [ ArbTxDecisionPolicy a { maxUnacknowledgedTxIds = x } + | (Positive (Small x)) <- shrink (Positive (Small (maxUnacknowledgedTxIds a))) + ] + ++ [ ArbTxDecisionPolicy a { txsSizeInflightPerPeer = SizeInBytes s } + | Positive s <- shrink (Positive (getSizeInBytes (txsSizeInflightPerPeer a))) + ] + ++ [ ArbTxDecisionPolicy a { maxOutstandingTxBatchesPerPeer = x } + | x <- shrink (maxOutstandingTxBatchesPerPeer a), x >= 1 + ] + ++ [ ArbTxDecisionPolicy a { txInflightMultiplicity = x } + | Positive (Small x) <- shrink (Positive (Small (txInflightMultiplicity a))) + ] + ++ [ ArbTxDecisionPolicy a { bufferedTxsMinLifetime = realToFrac x } + | NonNegative x <- shrink (NonNegative (realToFrac (bufferedTxsMinLifetime a) :: Double)) + ] + ++ [ ArbTxDecisionPolicy a { scoreRate = x } + | NonNegative x <- shrink (NonNegative (scoreRate a)) + ] + ++ [ ArbTxDecisionPolicy a { scoreMax = x } + | NonNegative x <- shrink (NonNegative (scoreMax a)) + ] + ++ [ ArbTxDecisionPolicy a { interTxSpace = realToFrac x } + | NonNegative x <- shrink (NonNegative (realToFrac (interTxSpace a) :: Double)) + ] instance Arbitrary ArbSharedPeerState where arbitrary = ArbSharedPeerState <$> genSharedPeerState @@ -518,12 +547,14 @@ instance Arbitrary ArbSharedTxState where -- Also asserts the peer's pre-existing state and unrelated shared state are -- preserved, and that the combined invariant holds before and after. prop_handleReceivedTxIds - :: ArbSharedTxState + :: ArbTxDecisionPolicy + -> ArbSharedTxState -> ArbPeerTxLocalState -> NonEmptyList (TxId, Positive Int, TxIdGroupTag) -> Positive Int -> Property prop_handleReceivedTxIds + (ArbTxDecisionPolicy policy) (ArbSharedTxState sharedState0) (ArbPeerTxLocalState peerStateGenerated) (NonEmpty taggedInput) @@ -559,7 +590,7 @@ prop_handleReceivedTxIds -- Seed the retained group into the shared state first: intern the txids -- and add them to sharedRetainedTxs. - sharedStateWithRetained = seedRetainedTxids retainedGroup sharedStateWithPeer + sharedStateWithRetained = seedRetainedTxids policy retainedGroup sharedStateWithPeer -- Pick an advertiser peer for the resolve-active sub-group, if any peer -- other than @peeraddr@ exists. If none is available, demote the @@ -627,10 +658,10 @@ prop_handleReceivedTxIds } oldPeerAvailableKeys = IntMap.keysSet (peerAvailableTxIds peerState0) (peerState', sharedState') = - handleReceivedTxIds mempoolHasTx now defaultTxDecisionPolicy peeraddr requestedToReply txidsAndSizes peerState0 sharedStateBase + handleReceivedTxIds mempoolHasTx now policy peeraddr requestedToReply txidsAndSizes peerState0 sharedStateBase expectedRetainUntil = - addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now + addTime (bufferedTxsMinLifetime policy) now -- Only the new group extends the peer's advertised-key set. expectedAdvertisedKeys = @@ -818,9 +849,9 @@ prop_handleReceivedTxIds , conjoin (fmap checkMempoolResolveActiveEntry mempoolResolveActiveGroup) , checkOtherPeerState , counterexample "combined invariant violated before the call" - (combinedStateInvariant StrongInvariant peeraddr peerState0 sharedStateBase) + (combinedStateInvariant policy StrongInvariant peeraddr peerState0 sharedStateBase) , counterexample "combined invariant violated after the call" - (combinedStateInvariant StrongInvariant peeraddr peerState' sharedState') + (combinedStateInvariant policy StrongInvariant peeraddr peerState' sharedState') , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) , checkNoThunks "sharedState'" sharedState' ] @@ -942,14 +973,18 @@ unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull step = do -- match the group sizes, peer-local fields handleReceivedTxs does not touch -- are preserved, and the combined invariant holds before and after. prop_handleReceivedTxs - :: ArbSharedTxState + :: ArbTxDecisionPolicy + -> ArbSharedTxState -> ArbPeerTxLocalState -> NonEmptyList (TxId, Positive Int, RequestedFate) + -> NonNegative Double -> Property prop_handleReceivedTxs + (ArbTxDecisionPolicy policy) (ArbSharedTxState sharedState0) (ArbPeerTxLocalState peerStateGenerated) - (NonEmpty requestedInput) = + (NonEmpty requestedInput) + (NonNegative initialScore) = forAll (genPeerAddrBiased sharedState0) $ \peeraddr -> let sharedStateWithPeer = ensurePeerAdvertisesTxKeys peeraddr [] sharedState0 @@ -999,7 +1034,7 @@ prop_handleReceivedTxs -- deliberately not seeded: their keys live only in the peer's local -- bookkeeping. sharedStateWithLateRetained = - seedRetainedTxids (fmap fst lateRetainedGroup) sharedStateWithPeer + seedRetainedTxids policy (fmap fst lateRetainedGroup) sharedStateWithPeer sharedStateBase = seedRequestedActiveTxids peeraddr otherPeerOpt activeSeedTagged sharedStateWithLateRetained @@ -1094,15 +1129,22 @@ prop_handleReceivedTxs peerAvailableTxIds = requestedAvailableMap, peerRequestedTxs = requestedKeysSet, peerRequestedTxBatches = StrictSeq.singleton requestedBatch, - peerRequestedTxsSize = requestedTotalSize + peerRequestedTxsSize = requestedTotalSize, + peerScore = PeerScore (min initialScore (scoreMax policy)) + (Time 0) } (omittedCount, lateCount, peerState', sharedState') = - handleReceivedTxs mempoolHasTxFn now defaultTxDecisionPolicy peeraddr + handleReceivedTxs mempoolHasTxFn now policy peeraddr receivedBodies peerState0 sharedStateBase + penaltyCount = omittedCount + lateCount + peerState'' | penaltyCount == 0 = peerState' + | otherwise = + snd (applyPeerRejections policy now penaltyCount peerState') + expectedRetainUntil = - addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now + addTime (bufferedTxsMinLifetime policy) now -- Per-fate assertions. checkBufferedEntry ((txid, size), _) = @@ -1323,9 +1365,24 @@ prop_handleReceivedTxs , conjoin (fmap checkPrunedEntry prunedAllocations) , checkOtherPeerState , counterexample "combined invariant violated before the call" - (combinedStateInvariant StrongInvariant peeraddr peerState0 sharedStateBase) + (combinedStateInvariant policy StrongInvariant peeraddr peerState0 sharedStateBase) , counterexample "combined invariant violated after the call" - (combinedStateInvariant StrongInvariant peeraddr peerState' sharedState') + (combinedStateInvariant policy StrongInvariant peeraddr peerState' sharedState') + , counterexample "score path: peerScoreValue not as expected" + (if penaltyCount == 0 + then peerScoreValue (peerScore peerState'') + === peerScoreValue (peerScore peerState0) + else peerScoreValue (peerScore peerState'') + === min (scoreMax policy) + (currentPeerScore policy now (peerScore peerState0) + + fromIntegral penaltyCount)) + , counterexample "score path: peerScoreTs not advanced" + (if penaltyCount == 0 + then peerScoreTs (peerScore peerState'') + === peerScoreTs (peerScore peerState0) + else peerScoreTs (peerScore peerState'') === now) + , counterexample "combined invariant violated after score update" + (combinedStateInvariant policy StrongInvariant peeraddr peerState'' sharedState') , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) , checkNoThunks "sharedState'" sharedState' ] @@ -1333,13 +1390,14 @@ prop_handleReceivedTxs -- Verifies that handleSubmittedTxs retains accepted txs and removes rejected -- txs from the active table and tx-key maps. prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> TxId -> TxId -> Positive Int -> Positive Int -> Property -prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected (Positive peeraddr) txidA0 txidB0 txSizeA0 txSizeB0 = +prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected (ArbTxDecisionPolicy policy) (Positive peeraddr) txidA0 txidB0 txSizeA0 txSizeB0 = txidA /= txidB ==> conjoin [ peerDownloadedTxs peerState' === IntMap.empty @@ -1378,17 +1436,18 @@ prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected (Positive peeraddr) txid kA = unTxKey keyA kB = unTxKey keyB peerState0 = emptyPeerTxLocalState { peerDownloadedTxs = IntMap.fromList [(kA, txA), (kB, txB)] } - expectedRetainUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now - (peerState', sharedState') = handleSubmittedTxs now defaultTxDecisionPolicy peeraddr [keyA] [keyB] peerState0 sharedState0 + expectedRetainUntil = addTime (bufferedTxsMinLifetime policy) now + (peerState', sharedState') = handleSubmittedTxs now policy peeraddr [keyA] [keyB] peerState0 sharedState0 -- Verifies that nextPeerAction submits buffered txs owned by the peer before -- taking any other action. prop_nextPeerAction_prioritisesSubmit - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> TxId -> Positive Int -> Property -prop_nextPeerAction_prioritisesSubmit (Positive peeraddr) txid0 txSize0 = +prop_nextPeerAction_prioritisesSubmit (ArbTxDecisionPolicy policy) (Positive peeraddr) txid0 txSize0 = case peerAction of PeerSubmitTxs [txKey] -> conjoin @@ -1422,37 +1481,38 @@ prop_nextPeerAction_prioritisesSubmit (Positive peeraddr) txid0 txSize0 = } peerState0 = emptyPeerTxLocalState { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy + , peerRequestedTxIds = maxNumTxIdsToRequest policy , peerDownloadedTxs = IntMap.singleton k tx } - (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + (peerAction, peerState', sharedState') = nextPeerAction now policy peeraddr peerState0 sharedState0 -- Verifies that nextPeerAction leases a claimable tx to the best idle -- advertiser and requests its body. prop_nextPeerAction_claimsClaimableTx - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> Positive Int -> Positive Int -> TxId -> Positive Int -> Property -prop_nextPeerAction_claimsClaimableTx (Positive peerA0) (Positive peerB0) (Positive peerC0) txid0 txSize0 = +prop_nextPeerAction_claimsClaimableTx (ArbTxDecisionPolicy policy) (Positive peerA0) (Positive peerB0) (Positive peerC0) txid0 txSize0 = distinctPeers ==> - peerTxLocalStateInvariant peerState0 .&&. + peerTxLocalStateInvariant policy peerState0 .&&. case peerAction of PeerRequestTxs txKeys -> conjoin [ txKeys === [key] , peerRequestedTxs peerState' === IntSet.singleton k , txLease (lookupEntryOrFail key sharedState') === - TxLeased peerA (addTime (interTxSpace defaultTxDecisionPolicy) now) + TxLeased peerA (addTime (interTxSpace policy) now) , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) , checkNoThunks "sharedState'" sharedState' ] _ -> counterexample ("unexpected peer action: " ++ show peerAction) False where peerA = peerA0 - peerAScore = PeerScore 1 now + peerAScore = PeerScore (min 1 (scoreMax policy)) now peerB = peerB0 + 1000 peerC = peerC0 + 2000 distinctPeers = peerA /= peerB && peerA /= peerC && peerB /= peerC @@ -1480,7 +1540,7 @@ prop_nextPeerAction_claimsClaimableTx (Positive peerA0) (Positive peerB0) (Posit , peerAvailableTxIds = IntMap.singleton k txSize , peerScore = peerAScore } - (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peerA peerState0 sharedState0 + (peerAction, peerState', sharedState') = nextPeerAction now policy peerA peerState0 sharedState0 unit_nextPeerAction_claimsAtScoreDelayThreshold :: (String -> IO ()) -> Assertion unit_nextPeerAction_claimsAtScoreDelayThreshold step = do @@ -1599,22 +1659,23 @@ prop_nextPeerAction_claimsRejectedTxFromOtherAdvertiser = -- Verifies that nextPeerAction can steal an expired lease for the best idle -- advertiser and request that tx. prop_nextPeerAction_claimsExpiredLease - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> Positive Int -> Positive Int -> TxId -> Positive Int -> Property -prop_nextPeerAction_claimsExpiredLease (Positive oldOwner0) (Positive peerA0) (Positive peerB0) txid0 txSize0 = +prop_nextPeerAction_claimsExpiredLease (ArbTxDecisionPolicy policy) (Positive oldOwner0) (Positive peerA0) (Positive peerB0) txid0 txSize0 = distinctPeers ==> - peerTxLocalStateInvariant peerState0 .&&. + peerTxLocalStateInvariant policy peerState0 .&&. case peerAction of PeerRequestTxs txKeys -> conjoin [ txKeys === [key] , peerRequestedTxs peerState' === IntSet.singleton k , txLease (lookupEntryOrFail key sharedState') === - TxLeased peerA (addTime (interTxSpace defaultTxDecisionPolicy) now) + TxLeased peerA (addTime (interTxSpace policy) now) , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) , checkNoThunks "sharedState'" sharedState' ] @@ -1622,7 +1683,7 @@ prop_nextPeerAction_claimsExpiredLease (Positive oldOwner0) (Positive peerA0) (P where oldOwner = oldOwner0 peerA = peerA0 + 1000 - peerAScore = PeerScore 1 now + peerAScore = PeerScore (min 1 (scoreMax policy)) now peerB = peerB0 + 2000 distinctPeers = oldOwner /= peerA && oldOwner /= peerB && peerA /= peerB txid = abs txid0 + 1 @@ -1649,17 +1710,18 @@ prop_nextPeerAction_claimsExpiredLease (Positive oldOwner0) (Positive peerA0) (P , peerAvailableTxIds = IntMap.singleton k txSize , peerScore = peerAScore } - (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peerA peerState0 sharedState0 + (peerAction, peerState', sharedState') = nextPeerAction now policy peerA peerState0 sharedState0 -- Verifies that nextPeerAction still requests an oversized first tx when it -- is the only available choice within the soft-budget policy. prop_nextPeerAction_requestsOversizedFirstTx - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> TxId -> Positive Int -> Property -prop_nextPeerAction_requestsOversizedFirstTx (Positive peeraddr) txid0 (Positive txSize0) = - peerTxLocalStateInvariant peerState0 .&&. +prop_nextPeerAction_requestsOversizedFirstTx (ArbTxDecisionPolicy basePolicy) (Positive peeraddr) txid0 (Positive txSize0) = + peerTxLocalStateInvariant policy peerState0 .&&. case peerAction of PeerRequestTxs [txKey] -> conjoin @@ -1677,7 +1739,7 @@ prop_nextPeerAction_requestsOversizedFirstTx (Positive peeraddr) txid0 (Positive txSize = mkSize (Positive (txSize0 + 1)) key = TxKey 0 k = unTxKey key - policy = defaultTxDecisionPolicy + policy = basePolicy { txsSizeInflightPerPeer = txSize - 1 , maxOutstandingTxBatchesPerPeer = 1 } @@ -1750,11 +1812,12 @@ unit_nextPeerAction_skipsBlockedAvailableTxs step = do -- Verifies that nextPeerAction submits buffered owned txs before -- acknowledging their txids. prop_nextPeerAction_ownerSubmitsBuffered - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> TxId -> Positive Int -> Property -prop_nextPeerAction_ownerSubmitsBuffered (Positive peeraddr) txid0 txSize0 = +prop_nextPeerAction_ownerSubmitsBuffered (ArbTxDecisionPolicy policy) (Positive peeraddr) txid0 txSize0 = case peerAction of PeerSubmitTxs [txKey] -> conjoin @@ -1786,10 +1849,10 @@ prop_nextPeerAction_ownerSubmitsBuffered (Positive peeraddr) txid0 txSize0 = } peerState0 = emptyPeerTxLocalState { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy + , peerRequestedTxIds = maxNumTxIdsToRequest policy , peerDownloadedTxs = IntMap.singleton k tx } - (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + (peerAction, peerState', sharedState') = nextPeerAction now policy peeraddr peerState0 sharedState0 -- Verifies that a blocked buffered tx does not prevent the peer from -- requesting a different claimable tx body. @@ -1928,11 +1991,12 @@ unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx step = do -- Verifies that nextPeerAction keeps non-owner txids unacknowledged until -- the tx has resolved out of the active table. prop_nextPeerAction_nonOwnerWaitsUntilResolved - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> Positive Int -> TxId -> Property -prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peeraddr0) txid0 = +prop_nextPeerAction_nonOwnerWaitsUntilResolved (ArbTxDecisionPolicy policy) (Positive owner0) (Positive peeraddr0) txid0 = owner /= peeraddr ==> conjoin [ case unresolvedAction of @@ -1983,7 +2047,7 @@ prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peera { peerUnacknowledgedTxIds = StrictSeq.singleton key , peerRequestedTxIds = 0 } - (unresolvedAction, unresolvedPeerState', unresolvedSharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 unresolvedSharedState + (unresolvedAction, unresolvedPeerState', unresolvedSharedState') = nextPeerAction now policy peeraddr peerState0 unresolvedSharedState unresolvedExpectations = conjoin [ peerUnacknowledgedTxIds unresolvedPeerState' === peerUnacknowledgedTxIds peerState0 @@ -1992,16 +2056,17 @@ prop_nextPeerAction_nonOwnerWaitsUntilResolved (Positive owner0) (Positive peera , checkNoThunks "unresolvedPeerState'" (unresolvedPeerState' :: PeerTxLocalState (Tx TxId)) , checkNoThunks "unresolvedSharedState'" unresolvedSharedState' ] - (resolvedAction, resolvedPeerState', resolvedSharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 resolvedSharedState + (resolvedAction, resolvedPeerState', resolvedSharedState') = nextPeerAction now policy peeraddr peerState0 resolvedSharedState -- Verifies that nextPeerActionPipelined does nothing when it can only -- acknowledge txids and cannot request new ones in the same step. prop_nextPeerActionPipelined_requiresAckAndReq - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> TxId -> Positive Int -> Property -prop_nextPeerActionPipelined_requiresAckAndReq (Positive peeraddr) txid0 _txSize0 = +prop_nextPeerActionPipelined_requiresAckAndReq (ArbTxDecisionPolicy policy) (Positive peeraddr) txid0 _txSize0 = case peerAction of PeerDoNothing _ _ -> conjoin @@ -2017,7 +2082,7 @@ prop_nextPeerActionPipelined_requiresAckAndReq (Positive peeraddr) txid0 _txSize k = unTxKey key peerState0 = emptyPeerTxLocalState { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy + , peerRequestedTxIds = maxNumTxIdsToRequest policy } sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) @@ -2026,16 +2091,17 @@ prop_nextPeerActionPipelined_requiresAckAndReq (Positive peeraddr) txid0 _txSize , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 } - (peerAction, peerState', sharedState') = nextPeerActionPipelined now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + (peerAction, peerState', sharedState') = nextPeerActionPipelined now policy peeraddr peerState0 sharedState0 -- Verifies that nextPeerActionPipelined requests txids once it can both -- acknowledge old txids and ask for more. prop_nextPeerActionPipelined_requestsTxIds - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> TxId -> Positive Int -> Property -prop_nextPeerActionPipelined_requestsTxIds (Positive peeraddr) txid0 _txSize0 = +prop_nextPeerActionPipelined_requestsTxIds (ArbTxDecisionPolicy policy) (Positive peeraddr) txid0 _txSize0 = case peerAction of PeerRequestTxIds _ txIdsToAcknowledge txIdsToReq -> conjoin @@ -2065,7 +2131,7 @@ prop_nextPeerActionPipelined_requestsTxIds (Positive peeraddr) txid0 _txSize0 = , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 } - (peerAction, peerState', sharedState') = nextPeerActionPipelined now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + (peerAction, peerState', sharedState') = nextPeerActionPipelined now policy peeraddr peerState0 sharedState0 unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply :: (String -> IO ()) @@ -2123,15 +2189,16 @@ unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply step = do -- Verifies that nextPeerActionPipelined opens a second outstanding body -- batch when another downloadable tx is available. prop_nextPeerActionPipelined_secondBodyBatch - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> TxId -> TxId -> Positive Int -> Positive Int -> Property -prop_nextPeerActionPipelined_secondBodyBatch (Positive peeraddr) txidA0 txidB0 txSizeA0 txSizeB0 = +prop_nextPeerActionPipelined_secondBodyBatch (ArbTxDecisionPolicy basePolicy) (Positive peeraddr) txidA0 txidB0 txSizeA0 txSizeB0 = txidA /= txidB ==> - peerTxLocalStateInvariant peerState0 .&&. + peerTxLocalStateInvariant policy peerState0 .&&. case peerAction of PeerRequestTxs [txKey] -> conjoin @@ -2140,7 +2207,7 @@ prop_nextPeerActionPipelined_secondBodyBatch (Positive peeraddr) txidA0 txidB0 t , StrictSeq.length (peerRequestedTxBatches peerState') === 2 , peerRequestedTxsSize peerState' === txSizeA + txSizeB , fmap txLease (IntMap.lookup kB (sharedTxTable sharedState')) === - Just (TxLeased peeraddr (addTime (interTxSpace defaultTxDecisionPolicy) now)) + Just (TxLeased peeraddr (addTime (interTxSpace policy) now)) , fmap (Map.lookup peeraddr . txAttempts) (IntMap.lookup kB (sharedTxTable sharedState')) === Just (Just TxDownloading) @@ -2178,12 +2245,18 @@ prop_nextPeerActionPipelined_secondBodyBatch (Positive peeraddr) txidA0 txidB0 t , sharedKeyToTxId = IntMap.fromList [(kA, txidA), (kB, txidB)] , sharedNextTxKey = 2 } - (peerAction, peerState', sharedState') = nextPeerActionPipelined now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + policy = basePolicy + { maxOutstandingTxBatchesPerPeer = max 2 (maxOutstandingTxBatchesPerPeer basePolicy) + , txsSizeInflightPerPeer = max (txSizeA + txSizeB) + (txsSizeInflightPerPeer basePolicy) + } + (peerAction, peerState', sharedState') = nextPeerActionPipelined now policy peeraddr peerState0 sharedState0 -- Verifies that nextPeerActionPipelined does not open a third outstanding -- body batch once the per-peer batch limit is reached. prop_nextPeerActionPipelined_noThirdBodyBatch - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> TxId -> TxId -> TxId @@ -2191,9 +2264,9 @@ prop_nextPeerActionPipelined_noThirdBodyBatch -> Positive Int -> Positive Int -> Property -prop_nextPeerActionPipelined_noThirdBodyBatch (Positive peeraddr) txidA0 txidB0 txidC0 txSizeA0 txSizeB0 txSizeC0 = +prop_nextPeerActionPipelined_noThirdBodyBatch (ArbTxDecisionPolicy basePolicy) (Positive peeraddr) txidA0 txidB0 txidC0 txSizeA0 txSizeB0 txSizeC0 = distinctTxIds ==> - peerTxLocalStateInvariant peerState0 .&&. + peerTxLocalStateInvariant policy peerState0 .&&. case peerAction of PeerDoNothing _ _ -> conjoin @@ -2245,16 +2318,17 @@ prop_nextPeerActionPipelined_noThirdBodyBatch (Positive peeraddr) txidA0 txidB0 , sharedNextTxKey = 3 } (peerAction, peerState', sharedState') = nextPeerActionPipelined now policy peeraddr peerState0 sharedState0 - policy = defaultTxDecisionPolicy { maxOutstandingTxBatchesPerPeer = 2 } + policy = basePolicy { maxOutstandingTxBatchesPerPeer = 2 } -- Verifies that nextPeerAction prunes expired retained txs and removes their -- tx-key mappings while the peer is idle. prop_nextPeerAction_prunesExpiredRetained - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> TxId -> Positive Int -> Property -prop_nextPeerAction_prunesExpiredRetained (Positive peeraddr) txid0 _txSize0 = +prop_nextPeerAction_prunesExpiredRetained (ArbTxDecisionPolicy policy) (Positive peeraddr) txid0 _txSize0 = case peerAction of PeerDoNothing _ Nothing -> conjoin @@ -2271,7 +2345,7 @@ prop_nextPeerAction_prunesExpiredRetained (Positive peeraddr) txid0 _txSize0 = key = TxKey 0 k = unTxKey key idlePeerState :: PeerTxLocalState (Tx TxId) - idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } + idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest policy } sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) , sharedRetainedTxs = retainedSingleton k now @@ -2282,16 +2356,17 @@ prop_nextPeerAction_prunesExpiredRetained (Positive peeraddr) txid0 _txSize0 = -- The central counters thread sweeps expired retained entries; emulate -- that by calling the same helper before evaluating the peer decision. sweptState = sweepSharedState now sharedState0 - (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr idlePeerState sweptState + (peerAction, peerState', sharedState') = nextPeerAction now policy peeraddr idlePeerState sweptState -- Verifies that nextPeerAction keeps unexpired retained txs and returns the -- wake delay until their expiry. prop_nextPeerAction_keepsRetained - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> TxId -> Positive Int -> Property -prop_nextPeerAction_keepsRetained (Positive peeraddr) txid0 _txSize0 = +prop_nextPeerAction_keepsRetained (ArbTxDecisionPolicy policy) (Positive peeraddr) txid0 _txSize0 = case peerAction of PeerDoNothing _ (Just wakeDelay) -> conjoin @@ -2310,7 +2385,7 @@ prop_nextPeerAction_keepsRetained (Positive peeraddr) txid0 _txSize0 = key = TxKey 0 k = unTxKey key idlePeerState :: PeerTxLocalState (Tx TxId) - idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } + idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest policy } sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) , sharedRetainedTxs = retainedSingleton k retainUntil @@ -2318,19 +2393,20 @@ prop_nextPeerAction_keepsRetained (Positive peeraddr) txid0 _txSize0 = , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 } - (peerAction, peerState', sharedState') = nextPeerAction now defaultTxDecisionPolicy peeraddr idlePeerState sharedState0 + (peerAction, peerState', sharedState') = nextPeerAction now policy peeraddr idlePeerState sharedState0 -- Verifies that PeerDoNothing waits until the earliest shared expiry, whether -- it comes from a lease or a retained tx. prop_nextPeerAction_earliestWakeDelay - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> Positive Int -> TxId -> TxId -> Positive Int -> Positive Int -> Property -prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txidA0 txidB0 _txSizeA0 _txSizeB0 = +prop_nextPeerAction_earliestWakeDelay (ArbTxDecisionPolicy policy) (Positive peeraddr) (Positive owner0) txidA0 txidB0 _txSizeA0 _txSizeB0 = peeraddr /= owner ==> conjoin [ case leaseFirstAction of @@ -2350,7 +2426,7 @@ prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txid retainUntilSoon = addTime 13 now keyA = TxKey 0 keyB = TxKey 1 - idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } + idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest policy } sharedPeers0 = Map.fromList [ (peeraddr, withAdvertisedTxKeys [keyA] (mkSharedPeerState PeerIdle)) , (owner, withAdvertisedTxKeys [keyA] (mkSharedPeerState PeerWaitingTxs)) @@ -2379,15 +2455,16 @@ prop_nextPeerAction_earliestWakeDelay (Positive peeraddr) (Positive owner0) txid , sharedKeyToTxId = IntMap.fromList [(unTxKey keyA, txidA), (unTxKey keyB, txidB)] , sharedNextTxKey = 2 } - (leaseFirstAction, _, _) = nextPeerAction now defaultTxDecisionPolicy peeraddr idlePeerState leaseFirstState - (retainFirstAction, _, _) = nextPeerAction now defaultTxDecisionPolicy peeraddr idlePeerState retainFirstState + (leaseFirstAction, _, _) = nextPeerAction now policy peeraddr idlePeerState leaseFirstState + (retainFirstAction, _, _) = nextPeerAction now policy peeraddr idlePeerState retainFirstState -- Verifies that PeerDoNothing reports the current generation of the acting -- peer. prop_nextPeerAction_returnsPeerGeneration - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> Property -prop_nextPeerAction_returnsPeerGeneration (Positive peeraddr) = +prop_nextPeerAction_returnsPeerGeneration (ArbTxDecisionPolicy policy) (Positive peeraddr) = case peerAction of PeerDoNothing generation Nothing -> generation === expectedGeneration _ -> counterexample ("unexpected peer action: " ++ show peerAction) False @@ -2408,20 +2485,21 @@ prop_nextPeerAction_returnsPeerGeneration (Positive peeraddr) = ) ] } - peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest defaultTxDecisionPolicy } - (peerAction, _, _) = nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 + peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest policy } + (peerAction, _, _) = nextPeerAction now policy peeraddr peerState0 sharedState0 -- Verifies that handleSubmittedTxs bumps the generation of every other -- advertiser of the resolved tx, regardless of phase, while leaving the -- submitting peer's own generation unchanged. prop_handleSubmittedTxs_bumpsAdvertisers - :: Positive Int + :: ArbTxDecisionPolicy + -> Positive Int -> Positive Int -> Positive Int -> TxId -> Positive Int -> Property -prop_handleSubmittedTxs_bumpsAdvertisers (Positive owner0) (Positive peerA0) (Positive peerB0) txid0 txSize0 = +prop_handleSubmittedTxs_bumpsAdvertisers (ArbTxDecisionPolicy policy) (Positive owner0) (Positive peerA0) (Positive peerB0) txid0 txSize0 = owner /= peerA && owner /= peerB && peerA /= peerB ==> conjoin [ sharedPeerGeneration (lookupPeerOrFail peerA sharedState') === 1 @@ -2453,7 +2531,7 @@ prop_handleSubmittedTxs_bumpsAdvertisers (Positive owner0) (Positive peerA0) (Po , sharedNextTxKey = 1 } peerState0 = emptyPeerTxLocalState { peerDownloadedTxs = IntMap.singleton k tx } - (_, sharedState') = handleSubmittedTxs now defaultTxDecisionPolicy owner [key] [] peerState0 sharedState0 + (_, sharedState') = handleSubmittedTxs now policy owner [key] [] peerState0 sharedState0 unit_advertisingPeersForTxExcept_scansAuthoritativePeerSets :: (String -> IO ()) -> Assertion unit_advertisingPeersForTxExcept_scansAuthoritativePeerSets step = do @@ -2627,8 +2705,9 @@ genPeerTxLocalState = sized $ \n -> do peerDownloadedTxs <- IntMap.fromList <$> mapM genDownloadedTx downloadedKeys - peerScoreValue <- choose (0 :: Double, 100) peerScoreTs <- genSmallTime + -- Generated peer states default to a zero score. + let peerScoreValue = 0 pure PeerTxLocalState { peerUnacknowledgedTxIds, peerAvailableTxIds, @@ -3163,10 +3242,11 @@ freshBatchAgainstSharedState sharedState = reverse . snd . foldl' step (reserved -- Intern the given txids into the shared state and seed each into -- sharedRetainedTxs. seedRetainedTxids - :: [(TxId, SizeInBytes)] + :: TxDecisionPolicy + -> [(TxId, SizeInBytes)] -> SharedTxState PeerAddr TxId -> SharedTxState PeerAddr TxId -seedRetainedTxids entries st0 = +seedRetainedTxids policy entries st0 = stInterned { sharedRetainedTxs = foldl' (\r k -> retainedInsertMax k retainUntil r) @@ -3174,7 +3254,7 @@ seedRetainedTxids entries st0 = retainedKeys } where - retainUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now + retainUntil = addTime (bufferedTxsMinLifetime policy) now (_, stInterned) = internTxIds (fmap fst entries) st0 retainedKeys = [ unTxKey (lookupKeyOrFail txid stInterned) | (txid, _) <- entries From 67d3e1d69851ff4b8af9c2edbd543ea54258d8a8 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 28 Apr 2026 10:03:34 +0200 Subject: [PATCH 43/67] fixup: fix peer-tx-loops When peers have conflicting advertisement orders for TXs it is possible for them to form a dependancy loop. For example peer A has tx id 1 but requires tx id 0 to submit, peer B has txid 2 but requires tx id 1 to submit, peer C has txid 0 but requires txid 2 before it can submit. We break this loop by introducing a inflightTimeout. When a peer has spent that much time unable to get the TX into the mempool it will bump the new currentMaxInflightMultiplicity limit. This will allow another peer to issue a new request for the TX. --- .../Network/TxSubmission/Inbound/V2/Policy.hs | 10 +- .../Network/TxSubmission/Inbound/V2/State.hs | 133 +++++++++++++++--- .../Network/TxSubmission/Inbound/V2/Types.hs | 10 +- .../Ouroboros/Network/TxSubmission/AppV2.hs | 10 +- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 94 ++++++++++--- 5 files changed, 210 insertions(+), 47 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs index ac262ac97fb..60de4951bcd 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs @@ -61,8 +61,13 @@ data TxDecisionPolicy = TxDecisionPolicy { scoreMax :: !Double, -- ^ Maximum number of "rejections". Unit: seconds - interTxSpace :: !DiffTime + interTxSpace :: !DiffTime, -- ^ space between actual requests for the same TX. + + inflightTimeout :: !DiffTime + -- ^ Maximum time a peer's attempt may sit between claim and the + -- TxSubmitting state before the per-entry inflight-multiplicity + -- cap is bumped, allowing another peer to attempt in parallel. } deriving (Eq, Show) @@ -80,5 +85,6 @@ defaultTxDecisionPolicy = bufferedTxsMinLifetime = 2, scoreRate = 0.1, scoreMax = 15 * 60, - interTxSpace = 0.250 + interTxSpace = 0.250, + inflightTimeout = 1.0 -- = 4 * interTxSpace } diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index bc3085db0b4..2ace773c1bc 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -135,7 +135,8 @@ nextPeerActionWithMode :: (Ord peeraddr, HasRawTxId txid) nextPeerActionWithMode txIdRequestMode now policy peeraddr peerState sharedState = applyPeerActionChoice ctx (pickPeerActionChoice txIdRequestMode ctx) where - ctx = mkPeerActionContext now policy peeraddr peerState sharedState + sharedState' = bumpStuckEntries now policy peeraddr peerState sharedState + ctx = mkPeerActionContext now policy peeraddr peerState sharedState' -- | Pick which action to perform next. -- @@ -437,18 +438,30 @@ pickRequestTxIdsAction txIdRequestMode ctx@PeerActionContext { pacPolicy, pacPee -- | Compute the time delay until the peer should next wake to check for work. nextWakeDelay :: PeerActionContext peeraddr txid tx -> Maybe DiffTime -nextWakeDelay PeerActionContext { pacNow, pacClaimDelay, pacSharedPeerState, pacSharedState } = - (`diffTime` pacNow) <$> minMaybe nextLeaseWake nextRetainWake +nextWakeDelay PeerActionContext { pacNow, pacPolicy, pacClaimDelay + , pacSharedPeerState, pacPeerState, pacSharedState } = + (`diffTime` pacNow) <$> minMaybe (minMaybe nextClaimWake nextBumpWake) nextRetainWake where - nextLeaseWake = - IntSet.foldl' stepLease Nothing (sharedPeerAdvertisedTxKeys pacSharedPeerState) + -- Wake at the earliest claim-ready time among txs this peer advertises. + nextClaimWake = + IntSet.foldl' stepClaim Nothing (sharedPeerAdvertisedTxKeys pacSharedPeerState) + + -- Wake at the earliest bump-ready time among txs this peer holds buffered. + -- Scoped to 'peerDownloadedTxs' to match 'bumpStuckEntries': only the + -- leaseholder schedules a bump-wake, so non-leaseholders don't busy-loop + -- at exact bumpAt with no entry to bump. + nextBumpWake = + IntMap.foldlWithKey' stepBump Nothing (peerDownloadedTxs pacPeerState) + + stepClaim acc k = + case IntMap.lookup k (sharedTxTable pacSharedState) of + Just txEntry -> minMaybe acc (futureClaimWake txEntry) + Nothing -> acc - stepLease acc k = + stepBump acc k _tx = case IntMap.lookup k (sharedTxTable pacSharedState) of - Just txEntry -> - minMaybe acc (futureClaimWake txEntry) - Nothing -> - acc + Just txEntry -> minMaybe acc (nextStuckBumpWake pacNow pacPolicy txEntry) + Nothing -> acc nextRetainWake = retainedNextWake pacNow (sharedRetainedTxs pacSharedState) @@ -473,6 +486,92 @@ claimTx peeraddr leaseUntil txEntry@TxEntry { txAttempts } = txAttempts = Map.insert peeraddr TxDownloading txAttempts } +-- | Time at which a leased entry becomes eligible for an inflight cap bump. +-- +-- The leaseholder's claim time is recovered from the lease deadline. If the +-- entry has been re-claimed since the original peer's attempt, this reflects +-- the latest claim instead. That is intentional: each successful claim earns +-- its own 'inflightTimeout' grace period before the next bump, so cap growth +-- is rate-limited at one bump per 'inflightTimeout' per claim. +stuckBumpReadyAt :: TxDecisionPolicy -> Time -> Time +stuckBumpReadyAt policy leaseUntil = + addTime (inflightTimeout policy - interTxSpace policy) leaseUntil + +-- | Bump 'currentMaxInflightMultiplicity' by one when the leaseholder has +-- held the lease past 'inflightTimeout' without anyone reaching 'TxSubmitting', +-- and the cap is the bottleneck preventing another peer from joining. +bumpCurrentMaxIfStuck :: Time -> TxDecisionPolicy -> TxEntry peeraddr -> TxEntry peeraddr +bumpCurrentMaxIfStuck now policy + entry@TxEntry { txLease = TxLeased _ leaseUntil + , currentMaxInflightMultiplicity = cap } + | txActiveAttemptCount entry >= cap + , now >= stuckBumpReadyAt policy leaseUntil + , not (txSubmittingAnywhere entry) + = entry { currentMaxInflightMultiplicity = cap + 1 } +bumpCurrentMaxIfStuck _ _ entry = entry + +-- | Future wake time at which an entry would become eligible for a cap bump. +-- +-- Returns 'Just' for any bump-eligible entry whose 'bumpAt' is at or after +-- 'now'. Using '>=' (rather than '>') means a peer that runs 'nextWakeDelay' +-- without 'bumpStuckEntries' having already fired still schedules a wake. +nextStuckBumpWake :: Time -> TxDecisionPolicy -> TxEntry peeraddr -> Maybe Time +nextStuckBumpWake now policy + entry@TxEntry { txLease = TxLeased _ leaseUntil + , currentMaxInflightMultiplicity = cap } + | txActiveAttemptCount entry >= cap + , not (txSubmittingAnywhere entry) + , let bumpAt = stuckBumpReadyAt policy leaseUntil + , bumpAt >= now + = Just bumpAt +nextStuckBumpWake _ _ _ = Nothing + +-- | Sweep the txs this peer has buffered locally and bump any whose lease +-- has been held past 'inflightTimeout'. The leaseholder is in the best +-- position to detect that it is holding others up: its 'peerDownloadedTxs' +-- is small (usually empty) so the sweep is cheap, and any tx it has buffered +-- is one it has at some point claimed itself. +-- +-- When an entry is bumped, this also bumps the per-peer generation of every +-- other peer that advertises it so they wake out of 'awaitSharedChange' and +-- re-evaluate eligibility under the new cap. 'sharedGeneration' is bumped +-- so 'writeSharedStateIfChanged' commits the update. +bumpStuckEntries :: Ord peeraddr + => Time + -> TxDecisionPolicy + -> peeraddr + -> PeerTxLocalState tx + -> SharedTxState peeraddr txid + -> SharedTxState peeraddr txid +bumpStuckEntries now policy peeraddr peerState st = + if IntSet.null bumpedKeys + then st + else bumpPeerGenerations + (advertisingPeersForTxKeysExcept peeraddr bumpedKeys st) + st { sharedTxTable = txTable', + sharedGeneration = sharedGeneration st + 1 } + where + (bumpedKeys, txTable') = + IntMap.foldlWithKey' bumpOne (IntSet.empty, sharedTxTable st) + (peerDownloadedTxs peerState) + bumpOne (bumpedAcc, tbl) k _tx = + case IntMap.lookup k tbl of + Nothing -> (bumpedAcc, tbl) + Just entry -> + let entry' = bumpCurrentMaxIfStuck now policy entry in + if currentMaxInflightMultiplicity entry' + /= currentMaxInflightMultiplicity entry + then (IntSet.insert k bumpedAcc, IntMap.insert k entry' tbl) + else (bumpedAcc, tbl) + +-- | Number of peers currently attempting this tx body. +-- +-- Counts every peer in 'txAttempts' regardless of attempt state. Callers +-- that have already excluded 'TxSubmitting' via 'txSubmittingAnywhere' +-- effectively count only 'TxDownloading' and 'TxBuffered'. +txActiveAttemptCount :: TxEntry peeraddr -> Int +txActiveAttemptCount TxEntry { txAttempts } = Map.size txAttempts + -- | Determine if a tx is eligible for this peer to request. -- -- A tx is selectable if it can be claimed or is already owned by this peer @@ -482,13 +581,13 @@ txSelectable :: Ord peeraddr -> TxKey -> TxEntry peeraddr -> Bool -txSelectable PeerActionContext { pacNow, pacPeerAddr, pacPolicy, pacSharedPeerState +txSelectable PeerActionContext { pacNow, pacPeerAddr, pacSharedPeerState , pacClaimDelay } txKey txEntry | txSubmittingAnywhere txEntry = False | txPeerHasAttempt = False - | txActiveAttemptCount txEntry >= txInflightMultiplicity pacPolicy = False + | txActiveAttemptCount txEntry >= currentMaxInflightMultiplicity txEntry = False | not peerAdvertisesTx = False | txOwnedByPeer txEntry = True | otherwise = txClaimReadyAt pacClaimDelay txEntry <= pacNow @@ -502,12 +601,6 @@ txSelectable PeerActionContext { pacNow, pacPeerAddr, pacPolicy, pacSharedPeerSt txPeerHasAttempt = Map.member pacPeerAddr (txAttempts txEntry) - -- Safe to use Map.size here: by the time this guard is reached, - -- txSubmittingAnywhere has already returned False, so the map contains - -- only TxDownloading and TxBuffered entries. - txActiveAttemptCount :: TxEntry peeraddr -> Int - txActiveAttemptCount TxEntry { txAttempts } = Map.size txAttempts - -- | Extract the peer's TxAttemptState for the TX entry, if it exists. txAttemptOfPeer :: Ord peeraddr => peeraddr -> TxEntry peeraddr -> Maybe TxAttemptState @@ -1371,7 +1464,9 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize let txEntry = TxEntry { txLease = TxClaimable now, txAdvertiserCount = 1, - txAttempts = Map.empty + txAttempts = Map.empty, + currentMaxInflightMultiplicity = + txInflightMultiplicity policy } in ( txKey : unacknowledgedAcc , IntMap.insert k txSize availableAcc diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index d5936f2caa3..ac6cc62e5e5 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -185,7 +185,15 @@ data TxEntry peeraddr = TxEntry { txAdvertiserCount :: !Int, -- | Current per-peer attempt state for this tx body. - txAttempts :: !(Map peeraddr TxAttemptState) + txAttempts :: !(Map peeraddr TxAttemptState), + + -- | Effective per-tx inflight multiplicity cap. + -- + -- Initialised from 'txInflightMultiplicity' of the policy when the + -- entry is created, and bumped by one when a peer's attempt sits past + -- 'inflightTimeout' without reaching 'TxSubmitting', allowing another + -- peer to attempt in parallel. + currentMaxInflightMultiplicity :: !Int } deriving stock (Eq, Show, Generic) deriving anyclass (NFData, NoThunks) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs index 81296c2419b..ecfd1885a67 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs @@ -427,16 +427,22 @@ prop_txSubmission_inflight st@(TxSubmissionState state policy) = then merge (mapMissing \_txid _left -> error "impossible") (mapMissing \_txid _right -> True) (zipWithMatched \_txid left right -> - left <= right `min` txInflightMultiplicity policy) + left <= right `min` inflightLimit) resultRepeatedValidTxs maxRepeatedValidTxs else merge (mapMissing \_txid _left -> error "impossible") (mapMissing \_txid _right -> False) (zipWithMatched \_txid left right -> - left <= right `min` txInflightMultiplicity policy) + left <= right `min` inflightLimit) resultRepeatedValidTxs maxRepeatedValidTxs where + -- Loosened from txInflightMultiplicity to account for the per-tx + -- 'currentMaxInflightMultiplicity' bumps that fire when a peer holds + -- a lease past 'inflightTimeout'. Each peer can contribute at most one + -- bump per stuck claim, so 'cap + peers' is a safe static upper bound. + inflightLimit = txInflightMultiplicity policy + Map.size state + -- we work with txid's because a repeated tx may have different advertised/actual -- byte size by different peers in this test, but otherwise multiplicity -- should be determined by txid. diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index f6417fc7c02..1173d692d3e 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -451,17 +451,22 @@ instance Arbitrary ArbTxDecisionPolicy where arbitrary = frequency [ (1, pure (ArbTxDecisionPolicy defaultTxDecisionPolicy)) - , (9, ArbTxDecisionPolicy <$> ( - TxDecisionPolicy . getSmall . getPositive - <$> arbitrary - <*> (getSmall . getPositive <$> arbitrary) - <*> (SizeInBytes . getPositive <$> arbitrary) - <*> choose (1, 10) - <*> (getSmall . getPositive <$> arbitrary) - <*> (realToFrac <$> choose (0 :: Double, 2)) - <*> choose (0, 1) - <*> choose (0, 1800) - <*> (realToFrac <$> choose (0 :: Double, 1)))) + , (9, do + interTxSpaceVal <- realToFrac <$> choose (0 :: Double, 1) + offset <- choose (0.01 :: Double, 10) + let inflightTimeoutVal = interTxSpaceVal + realToFrac offset + ArbTxDecisionPolicy <$> ( + TxDecisionPolicy . getSmall . getPositive + <$> arbitrary + <*> (getSmall . getPositive <$> arbitrary) + <*> (SizeInBytes . getPositive <$> arbitrary) + <*> choose (1, 10) + <*> (getSmall . getPositive <$> arbitrary) + <*> (realToFrac <$> choose (0 :: Double, 2)) + <*> choose (0, 1) + <*> choose (0, 1800) + <*> pure interTxSpaceVal + <*> pure inflightTimeoutVal)) ] shrink (ArbTxDecisionPolicy a) @@ -495,6 +500,10 @@ instance Arbitrary ArbTxDecisionPolicy where ++ [ ArbTxDecisionPolicy a { interTxSpace = realToFrac x } | NonNegative x <- shrink (NonNegative (realToFrac (interTxSpace a) :: Double)) ] + ++ [ ArbTxDecisionPolicy a { inflightTimeout = realToFrac x } + | NonNegative x <- shrink (NonNegative (realToFrac (inflightTimeout a) :: Double)) + , realToFrac x > interTxSpace a + ] instance Arbitrary ArbSharedPeerState where arbitrary = ArbSharedPeerState <$> genSharedPeerState @@ -1427,8 +1436,8 @@ prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected (ArbTxDecisionPolicy pol ensurePeerAdvertisesTxKeys peeraddr [keyA', keyB'] $ st { sharedTxTable = IntMap.fromList - [ (unTxKey keyA', mkTxEntry peeraddr txSizeA (Just TxBuffered)) - , (unTxKey keyB', mkTxEntry peeraddr txSizeB (Just TxBuffered)) + [ (unTxKey keyA', mkTxEntry peeraddr txSizeA (Just TxBuffered) policy) + , (unTxKey keyB', mkTxEntry peeraddr txSizeB (Just TxBuffered) policy) ] } keyA = lookupKeyOrFail txidA sharedState0 @@ -1474,6 +1483,7 @@ prop_nextPeerAction_prioritisesSubmit (ArbTxDecisionPolicy policy) (Positive pee { txLease = TxLeased peeraddr (addTime 10 now) , txAdvertiserCount = 1 , txAttempts = Map.singleton peeraddr TxBuffered + , currentMaxInflightMultiplicity = txInflightMultiplicity policy } , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid @@ -1533,6 +1543,7 @@ prop_nextPeerAction_claimsClaimableTx (ArbTxDecisionPolicy policy) (Positive pee { txLease = TxClaimable (Time 0) , txAdvertiserCount = 3 , txAttempts = Map.empty + , currentMaxInflightMultiplicity = txInflightMultiplicity policy } } peerState0 = emptyPeerTxLocalState @@ -1572,6 +1583,8 @@ unit_nextPeerAction_claimsAtScoreDelayThreshold step = do { txLease = TxClaimable claimableAt , txAdvertiserCount = 1 , txAttempts = Map.empty + , currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy } } peerState0 = emptyPeerTxLocalState @@ -1630,6 +1643,8 @@ prop_nextPeerAction_claimsRejectedTxFromOtherAdvertiser = { txLease = TxLeased peerA (addTime 1 now) , txAdvertiserCount = 2 , txAttempts = Map.singleton peerA TxBuffered + , currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy } } @@ -1703,6 +1718,7 @@ prop_nextPeerAction_claimsExpiredLease (ArbTxDecisionPolicy policy) (Positive ol { txLease = TxLeased oldOwner (Time 0) , txAdvertiserCount = 3 , txAttempts = Map.empty + , currentMaxInflightMultiplicity = txInflightMultiplicity policy } } peerState0 = emptyPeerTxLocalState @@ -1749,7 +1765,7 @@ prop_nextPeerAction_requestsOversizedFirstTx (ArbTxDecisionPolicy basePolicy) (P , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 - , sharedTxTable = IntMap.singleton k (mkTxEntry peeraddr txSize Nothing) + , sharedTxTable = IntMap.singleton k (mkTxEntry peeraddr txSize Nothing policy) } peerState0 = emptyPeerTxLocalState { peerUnacknowledgedTxIds = StrictSeq.singleton key @@ -1798,11 +1814,13 @@ unit_nextPeerAction_skipsBlockedAvailableTxs step = do { txLease = TxLeased otherPeer (addTime 10 testNow) , txAdvertiserCount = 2 , txAttempts = Map.empty + , currentMaxInflightMultiplicity = txInflightMultiplicity policy }) , (kClaimable, TxEntry { txLease = TxClaimable (Time 0) , txAdvertiserCount = 1 , txAttempts = Map.empty + , currentMaxInflightMultiplicity = txInflightMultiplicity policy }) ] } @@ -1842,6 +1860,7 @@ prop_nextPeerAction_ownerSubmitsBuffered (ArbTxDecisionPolicy policy) (Positive { txLease = TxClaimable (Time 0) , txAdvertiserCount = 1 , txAttempts = Map.singleton peeraddr TxBuffered + , currentMaxInflightMultiplicity = txInflightMultiplicity policy } , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid @@ -1890,6 +1909,8 @@ unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx step = do [ (peeraddr, TxBuffered) , (submittingPeer, TxSubmitting) ] + , currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy } sharedState0 = emptySharedTxState { sharedPeers = Map.fromList @@ -1904,6 +1925,8 @@ unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx step = do { txLease = TxClaimable (Time 0) , txAdvertiserCount = 1 , txAttempts = Map.empty + , currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy }) ] , sharedTxIdToKey = Map.fromList @@ -1961,6 +1984,8 @@ unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx step = do [ (peeraddr, TxBuffered) , (submittingPeer, TxSubmitting) ] + , currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy } sharedState0 = emptySharedTxState { sharedPeers = Map.fromList @@ -2034,6 +2059,7 @@ prop_nextPeerAction_nonOwnerWaitsUntilResolved (ArbTxDecisionPolicy policy) (Pos { txLease = TxClaimable (Time 0) , txAdvertiserCount = 2 , txAttempts = Map.singleton owner TxBuffered + , currentMaxInflightMultiplicity = txInflightMultiplicity policy } , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid @@ -2234,11 +2260,12 @@ prop_nextPeerActionPipelined_secondBodyBatch (ArbTxDecisionPolicy basePolicy) (P Map.singleton peeraddr (withAdvertisedTxKeys [keyA, keyB] (mkSharedPeerState PeerIdle)) , sharedTxTable = IntMap.fromList - [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading)) + [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading) policy) , (kB, TxEntry { txLease = TxClaimable (Time 0) , txAdvertiserCount = 1 , txAttempts = Map.empty + , currentMaxInflightMultiplicity = txInflightMultiplicity policy }) ] , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB)] @@ -2305,12 +2332,13 @@ prop_nextPeerActionPipelined_noThirdBodyBatch (ArbTxDecisionPolicy basePolicy) ( Map.singleton peeraddr (withAdvertisedTxKeys [keyA, keyB, keyC] (mkSharedPeerState PeerIdle)) , sharedTxTable = IntMap.fromList - [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading)) - , (kB, mkTxEntry peeraddr txSizeB (Just TxDownloading)) + [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading) policy) + , (kB, mkTxEntry peeraddr txSizeB (Just TxDownloading) policy) , (kC, TxEntry { txLease = TxClaimable (Time 0) , txAdvertiserCount = 1 , txAttempts = Map.empty + , currentMaxInflightMultiplicity = txInflightMultiplicity policy }) ] , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB), (getRawTxId txidC, keyC)] @@ -2437,6 +2465,7 @@ prop_nextPeerAction_earliestWakeDelay (ArbTxDecisionPolicy policy) (Positive pee { txLease = TxLeased owner leaseUntil , txAdvertiserCount = 2 , txAttempts = Map.empty + , currentMaxInflightMultiplicity = txInflightMultiplicity policy } , sharedRetainedTxs = retainedSingleton (unTxKey keyB) retainUntilLater , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB)] @@ -2449,6 +2478,7 @@ prop_nextPeerAction_earliestWakeDelay (ArbTxDecisionPolicy policy) (Positive pee { txLease = TxLeased owner leaseUntilLater , txAdvertiserCount = 2 , txAttempts = Map.empty + , currentMaxInflightMultiplicity = txInflightMultiplicity policy } , sharedRetainedTxs = retainedSingleton (unTxKey keyB) retainUntilSoon , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB)] @@ -2525,6 +2555,7 @@ prop_handleSubmittedTxs_bumpsAdvertisers (ArbTxDecisionPolicy policy) (Positive { txLease = TxLeased owner (addTime 10 now) , txAdvertiserCount = 3 , txAttempts = Map.singleton owner TxBuffered + , currentMaxInflightMultiplicity = txInflightMultiplicity policy } , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid @@ -2562,6 +2593,8 @@ unit_advertisingPeersForTxExcept_scansAuthoritativePeerSets step = do { txLease = TxLeased owner (addTime 10 now) , txAdvertiserCount = 1 , txAttempts = Map.empty + , currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy } } @@ -2600,6 +2633,8 @@ unit_removeAdvertisingPeersForResolvedTx_clearsAllAdvertisingPeers step = do { txLease = TxLeased owner (addTime 10 now) , txAdvertiserCount = 1 , txAttempts = Map.empty + , currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy } } @@ -2647,6 +2682,8 @@ unit_updatePeerPhase_wakesCompetingAdvertisers step = do { txLease = TxClaimable (Time 0) , txAdvertiserCount = 2 , txAttempts = Map.empty + , currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy } } sharedState' = updatePeerPhase leavingPeer PeerWaitingTxs sharedState0 @@ -2795,7 +2832,9 @@ genLeasedTxEntry peeraddrs _txid = do , TxEntry { txLease, txAdvertiserCount = Set.size txAdvertisers, - txAttempts = maybe Map.empty (Map.singleton owner) ownerAttempt + txAttempts = maybe Map.empty (Map.singleton owner) ownerAttempt, + currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy } ) @@ -2810,7 +2849,9 @@ genClaimableTxEntry peeraddrs _txid = do , TxEntry { txLease = TxClaimable claimableAt, txAdvertiserCount = Set.size txAdvertisers, - txAttempts = Map.empty + txAttempts = Map.empty, + currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy } ) @@ -3201,11 +3242,12 @@ mkRequestedTxBatch keys requestedTxBatchSize = RequestedTxBatch } -- Construct a leased tx entry owned by one peer. -mkTxEntry :: PeerAddr -> SizeInBytes -> Maybe TxAttemptState -> TxEntry PeerAddr -mkTxEntry peeraddr _txSize mAttempt = TxEntry +mkTxEntry :: PeerAddr -> SizeInBytes -> Maybe TxAttemptState -> TxDecisionPolicy -> TxEntry PeerAddr +mkTxEntry peeraddr _txSize mAttempt policy = TxEntry { txLease = TxLeased peeraddr (addTime 10 now) , txAdvertiserCount = 1 , txAttempts = maybe Map.empty (Map.singleton peeraddr) mAttempt + , currentMaxInflightMultiplicity = txInflightMultiplicity policy } -- Look up an interned key and fail fast in test setup code. @@ -3280,7 +3322,9 @@ seedActiveTxidsForOtherPeer otherPeer entries st0 = activeEntry = TxEntry { txLease = TxClaimable now, txAdvertiserCount = 1, - txAttempts = Map.empty + txAttempts = Map.empty, + currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy } (_, stInterned) = internTxIds (fmap fst entries) st0 activeKeys = [ unTxKey (lookupKeyOrFail txid stInterned) @@ -3323,7 +3367,9 @@ seedRequestedActiveTxids peeraddr otherPeerOpt tagged st0 = mkEntry coAdv = TxEntry { txLease = TxLeased peeraddr leaseUntil, txAdvertiserCount = if coAdv then 2 else 1, - txAttempts = Map.singleton peeraddr TxDownloading + txAttempts = Map.singleton peeraddr TxDownloading, + currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy } stWithTable = stInterned { @@ -3562,6 +3608,8 @@ mkActiveSharedState allPeers ownerPeer resolvedAdvertisers txidsAndSizes = { txLease = TxLeased ownerPeer (addTime 10 now) , txAdvertiserCount = Set.size advertisers , txAttempts = Map.empty + , currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy } -- Resolve all active txs into retained entries so non-owner peers may safely From 4298fff4134cf69e96b7e038dba6a0786573b95c Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 28 Apr 2026 14:24:44 +0200 Subject: [PATCH 44/67] AppV2: Impairment test Add tests that verify that V2 is robust even if peers are late or omit TXs in replies. --- ouroboros-network/ouroboros-network.cabal | 1 + .../Ouroboros/Network/TxSubmission/AppV2.hs | 176 ++++++++++++++++-- .../Network/TxSubmission/Impaired.hs | 103 ++++++++++ 3 files changed, 260 insertions(+), 20 deletions(-) create mode 100644 ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Impaired.hs diff --git a/ouroboros-network/ouroboros-network.cabal b/ouroboros-network/ouroboros-network.cabal index 472f531752d..4dd8c5d2246 100644 --- a/ouroboros-network/ouroboros-network.cabal +++ b/ouroboros-network/ouroboros-network.cabal @@ -970,6 +970,7 @@ library ouroboros-network-tests-lib Test.Ouroboros.Network.TxSubmission Test.Ouroboros.Network.TxSubmission.AppV1 Test.Ouroboros.Network.TxSubmission.AppV2 + Test.Ouroboros.Network.TxSubmission.Impaired Test.Ouroboros.Network.TxSubmission.Mempool.Simple Test.Ouroboros.Network.TxSubmission.MempoolWriter Test.Ouroboros.Network.TxSubmission.TxLogic diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs index ecfd1885a67..4bbe1b517d8 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs @@ -59,6 +59,9 @@ import Ouroboros.Network.TxSubmission.Inbound.V2.Types import Ouroboros.Network.TxSubmission.Outbound import Ouroboros.Network.Util.ShowProxy +import System.Random (mkStdGen) + +import Test.Ouroboros.Network.TxSubmission.Impaired (delayBodies, omitBodies) import Test.Ouroboros.Network.TxSubmission.TxLogic hiding (tests) import Test.Ouroboros.Network.TxSubmission.Types import Test.Ouroboros.Network.Utils hiding (debugTracer) @@ -73,8 +76,9 @@ import Test.Tasty.QuickCheck (testProperty) tests :: TestTree tests = testGroup "AppV2" - [ testProperty "txSubmission" prop_txSubmission - , testProperty "inflight" prop_txSubmission_inflight + [ testProperty "txSubmission" prop_txSubmission + , testProperty "inflight" prop_txSubmission_inflight + , testProperty "resilientToImpairment" prop_txSubmission_resilientToImpairment , testProperty "SharedTxState" $ withMaxSize 25 $ withMaxSuccess 25 prop_sharedTxStateInvariant @@ -92,9 +96,43 @@ data TxSubmissionState = -- delay is less than 10s, otherwise 'smallDelay' in -- 'timeLimitsTxSubmission2' will kick in. ) + , peerImpairment :: Map Int Impairment , decisionPolicy :: TxDecisionPolicy } deriving (Show) +-- | Behavioural fault injection on a peer's outbound 'TxSubmissionClient'. +-- Peers absent from 'peerImpairment' run with no impairment. +data Impairment = Impairment + { impairBodyDelay :: Maybe DiffTime + -- ^ added before each MsgReplyTxs; txid replies are unaffected + , impairOmitProb :: Double + -- ^ per-body Bernoulli drop probability, in [0, 1] + , impairSeed :: Int + -- ^ seed for the per-peer StdGen used by 'omitBodies' + } deriving Show + +noImpairment :: Impairment +noImpairment = Impairment { impairBodyDelay = Nothing + , impairOmitProb = 0 + , impairSeed = 0 + } + +-- | Wrap a 'TxSubmissionClient' with the given 'Impairment'. Allocates a +-- per-peer 'StdGen' TVar only when the omission rate is non-zero. +applyImpairment :: (MonadDelay m, MonadSTM m) + => Impairment + -> TxSubmissionClient txid tx m a + -> m (TxSubmissionClient txid tx m a) +applyImpairment Impairment { impairBodyDelay, impairOmitProb, impairSeed } c0 = do + c1 <- if impairOmitProb > 0 + then do + genVar <- newTVarIO (mkStdGen impairSeed) + pure (omitBodies genVar impairOmitProb c0) + else pure c0 + pure $ case impairBodyDelay of + Just d -> delayBodies d c1 + Nothing -> c1 + instance Arbitrary TxSubmissionState where arbitrary = do ArbTxDecisionPolicy decisionPolicy <- arbitrary @@ -108,13 +146,14 @@ instance Arbitrary TxSubmissionState where peersState <- zipWith (curry (\(a, (b, c)) -> (a, b, c))) txs <$> vectorOf peersN arbitrary return TxSubmissionState { peerMap = Map.fromList (zip peers peersState), + peerImpairment = Map.empty, decisionPolicy } - shrink TxSubmissionState { peerMap, decisionPolicy } = - TxSubmissionState <$> shrinkMap1 peerMap - <*> [ policy - | ArbTxDecisionPolicy policy <- shrink (ArbTxDecisionPolicy decisionPolicy) - ] + shrink TxSubmissionState { peerMap, peerImpairment, decisionPolicy } = + [ TxSubmissionState peerMap' peerImpairment policy + | peerMap' <- shrinkMap1 peerMap + , ArbTxDecisionPolicy policy <- shrink (ArbTxDecisionPolicy decisionPolicy) + ] where shrinkMap1 :: (Ord k, Arbitrary k, Arbitrary v) => Map k v -> [Map k v] shrinkMap1 m @@ -163,10 +202,11 @@ runTxSubmission , Maybe DiffTime , Maybe DiffTime ) + -> Map peeraddr Impairment -> TxDecisionPolicy -> m ([Tx txid], [[Tx txid]]) -- ^ inbound and outbound mempools -runTxSubmission tracer _tracerTxLogic st0 txDecisionPolicy = do +runTxSubmission tracer _tracerTxLogic st0 peerImpairmentMap txDecisionPolicy = do st <- traverse (\(b, c, d, e) -> do mempool <- newMempool b (outChannel, inChannel) <- createConnectedChannels @@ -184,13 +224,15 @@ runTxSubmission tracer _tracerTxLogic st0 txDecisionPolicy = do labelTVarIO sharedTxStateVar "shared-tx-state" let clients = (\(addr, (mempool {- txs -}, ctrlMsgSTM, outDelay, _, outChannel, _)) -> do - let client = txSubmissionOutbound - (Tracer $ say . show) - (NumTxIdsToAck $ getNumTxIdsToReq - $ maxUnacknowledgedTxIds txDecisionPolicy) - (getMempoolReader mempool) - (maxBound :: TestVersion) - ctrlMsgSTM + let baseClient = txSubmissionOutbound + (Tracer $ say . show) + (NumTxIdsToAck $ getNumTxIdsToReq + $ maxUnacknowledgedTxIds txDecisionPolicy) + (getMempoolReader mempool) + (maxBound :: TestVersion) + ctrlMsgSTM + imp = Map.findWithDefault noImpairment addr peerImpairmentMap + client <- applyImpairment imp baseClient runPeerWithLimits (("OUTBOUND " ++ show addr,) `contramap` tracer) txSubmissionCodec2 (byteLimitsTxSubmission2 (fromIntegral . BSL.length)) @@ -255,7 +297,7 @@ runTxSubmission tracer _tracerTxLogic st0 txDecisionPolicy = do txSubmissionSimulation :: forall s . TxSubmissionState -> IOSim s ([Tx Int], [[Tx Int]]) -- ^ inbound & outbound mempools -txSubmissionSimulation (TxSubmissionState state txDecisionPolicy) = do +txSubmissionSimulation (TxSubmissionState state peerImpairment txDecisionPolicy) = do state' <- traverse (\(txs, mbOutDelay, mbInDelay) -> do let mbOutDelayTime = getSmallDelay . getPositive <$> mbOutDelay mbInDelayTime = getSmallDelay . getPositive <$> mbInDelay @@ -293,7 +335,7 @@ txSubmissionSimulation (TxSubmissionState state txDecisionPolicy) = do ) \_ -> do let tracer :: forall a. (Show a, Typeable a) => Tracer (IOSim s) a tracer = dynamicTracer <> sayTracer -- <> verboseTracer <> debugTracer - runTxSubmission tracer tracer state'' txDecisionPolicy + runTxSubmission tracer tracer state'' peerImpairment txDecisionPolicy filterValidTxs :: [Tx txid] -> [Tx txid] filterValidTxs @@ -305,7 +347,7 @@ filterValidTxs -- didn't regress. -- prop_txSubmission :: TxSubmissionState -> Property -prop_txSubmission st@(TxSubmissionState peers _) = +prop_txSubmission st@(TxSubmissionState peers _ _) = let tr = runSimTrace (txSubmissionSimulation st) numPeersWithWronglySizedTx :: Int numPeersWithWronglySizedTx = @@ -398,7 +440,7 @@ prop_txSubmission st@(TxSubmissionState peers _) = -- TODO: have we generated enough outbound mempools which interact in interesting -- ways? prop_txSubmission_inflight :: TxSubmissionState -> Property -prop_txSubmission_inflight st@(TxSubmissionState state policy) = +prop_txSubmission_inflight st@(TxSubmissionState state _ policy) = let maxRepeatedValidTxs = Map.foldr (\(txs, _, _) r -> foldr fn r txs) Map.empty state @@ -456,8 +498,102 @@ prop_txSubmission_inflight st@(TxSubmissionState state policy) = = r' +-- | Resilience to per-peer impairment. With a non-empty subset of peers +-- wrapped in any combination of 'omitBodies' and 'delayBodies', every tx +-- contributed by a well-behaved peer must still reach the inbound mempool. +-- Exercises V2's cross-peer retry path (omission) and stuck-leaseholder +-- bump path (delay). +-- +-- Contributions from an impaired peer are not asserted: omitted bodies +-- may never reach the mempool, and severely delayed bodies may not arrive +-- before the simulation terminates. +prop_txSubmission_resilientToImpairment :: TxSubmissionState -> Property +prop_txSubmission_resilientToImpairment baseSt = + forAll (genImpairment (Map.keys (peerMap baseSt))) $ \imp -> + not (Map.null imp) ==> + let st = baseSt { peerImpairment = imp } + allAddrs = Map.keysSet (peerMap st) + wbAddrs = allAddrs `Set.difference` Map.keysSet imp + wbPeerTxs = [ txs + | addr <- Set.toList wbAddrs + , let (txs, _, _) = peerMap st Map.! addr ] + tr = runSimTrace (txSubmissionSimulation st) + in not (Set.null wbAddrs) ==> + label ("impaired peers: " ++ show (Map.size imp)) $ + label ("well-behaved peers: " ++ show (Set.size wbAddrs)) $ + tabulate "impairment kind" (kindOf <$> Map.elems imp) $ + case traceResult True tr of + Left e -> + counterexample (show e) + . counterexample (ppTrace tr) + $ False + Right (inmp, _) -> + counterexample (ppTrace tr) + $ conjoin (validateWellBehaved inmp `map` wbPeerTxs) + where + -- Pick a non-empty proper subset of peers to impair. Each impaired + -- peer gets some mix of body delay and per-body omission (at least + -- one of the two). + genImpairment :: [Int] -> Gen (Map Int Impairment) + genImpairment addrs + | length addrs < 2 = pure Map.empty + | otherwise = do + n <- choose (1, length addrs - 1) + shuffled <- shuffle addrs + let impaired = take n shuffled + imps <- traverse (const genOneImpairment) impaired + pure (Map.fromList (zip impaired imps)) + + genOneImpairment :: Gen Impairment + genOneImpairment = oneof [genOmit, genDelay, genBoth] + + genOmit = do + p <- choose (0.1 :: Double, 0.9) + seed <- arbitrary + pure Impairment { impairBodyDelay = Nothing + , impairOmitProb = p + , impairSeed = seed + } + genDelay = do + d <- choose (0.1 :: Double, 2.0) + pure Impairment { impairBodyDelay = Just (realToFrac d) + , impairOmitProb = 0 + , impairSeed = 0 + } + genBoth = do + p <- choose (0.1 :: Double, 0.9) + seed <- arbitrary + d <- choose (0.1 :: Double, 2.0) + pure Impairment { impairBodyDelay = Just (realToFrac d) + , impairOmitProb = p + , impairSeed = seed + } + + kindOf Impairment { impairBodyDelay = Nothing, impairOmitProb = _ } = "omit-only" + kindOf Impairment { impairBodyDelay = Just _, impairOmitProb = 0 } = "delay-only" + kindOf Impairment { impairBodyDelay = Just _, impairOmitProb = _ } = "delay+omit" + + -- Same shape as 'validate' inside 'prop_txSubmission'. Only assert + -- coverage when the peer's stream is all-unique-and-all-valid; the + -- duplicate / invalid-prefix cases are out of scope here and covered + -- by the existing 'prop_txSubmission'. + validateWellBehaved :: [Tx Int] -> [Tx Int] -> Property + validateWellBehaved inmp outmp = + let outUnique = nubBy ((==) `on` getTxId) outmp + outValid = filterValidTxs outmp in + if length outUnique == length outmp && length outValid == length outmp + then + let outIds = Set.fromList (getTxId <$> outValid) + inIds = Set.fromList (getTxId <$> inmp) + missing = outIds `Set.difference` inIds in + counterexample ("missing: " ++ show (Set.toList missing)) + $ property (Set.null missing) + else + property True + + prop_sharedTxStateInvariant :: TxSubmissionState -> Property -prop_sharedTxStateInvariant initialState@(TxSubmissionState st0 _) = +prop_sharedTxStateInvariant initialState@(TxSubmissionState st0 _ _) = let tr = runSimTrace (() <$ txSubmissionSimulation initialState) pTrace = List.intercalate "\n" $ map (\(Time t, ev) -> show t <> " " <> ev) $ selectTraceEventsSayWithTime' tr diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Impaired.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Impaired.hs new file mode 100644 index 00000000000..8a96533ce68 --- /dev/null +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Impaired.hs @@ -0,0 +1,103 @@ +{-# LANGUAGE GADTs #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE ScopedTypeVariables #-} + +-- | Test-only impairment shims for 'TxSubmissionClient'. They wrap an +-- existing outbound peer with behavioural faults at the typed-protocol +-- level: 'delayBodies' adds latency to body replies, 'omitBodies' drops +-- bodies probabilistically. Both pass txid replies through unchanged so +-- the impaired peer still advertises promptly. +-- +-- The wrappers are polymorphic in @txid@ and @tx@ and depend only on the +-- TxSubmission2 client types. +module Test.Ouroboros.Network.TxSubmission.Impaired + ( delayBodies + , omitBodies + ) where + +import Control.Concurrent.Class.MonadSTM.Strict (MonadSTM, StrictTVar, + atomically, stateTVar) +import Control.Monad (filterM) +import Control.Monad.Class.MonadTime.SI (DiffTime) +import Control.Monad.Class.MonadTimer.SI (MonadDelay, threadDelay) +import System.Random (StdGen, uniformR) + +import Ouroboros.Network.Protocol.TxSubmission2.Client + + +-- | Add a fixed delay before every 'MsgReplyTxs' (body reply); txid +-- replies pass through unchanged. Models a peer that advertises promptly +-- but is slow to deliver bodies. +-- +-- The wrapper is recursive: every 'ClientStIdle' continuation produced +-- by the inner peer is wrapped in turn, so the delay applies to every +-- body reply through the protocol session — not just the first. +delayBodies + :: forall txid tx m a. + MonadDelay m + => DiffTime + -> TxSubmissionClient txid tx m a + -> TxSubmissionClient txid tx m a +delayBodies d (TxSubmissionClient mIdle) = + TxSubmissionClient (wrapIdle <$> mIdle) + where + wrapIdle :: ClientStIdle txid tx m a -> ClientStIdle txid tx m a + wrapIdle ClientStIdle { recvMsgRequestTxIds, recvMsgRequestTxs } = ClientStIdle + { recvMsgRequestTxIds = \blocking ack req -> do + reply <- recvMsgRequestTxIds blocking ack req + pure (wrapTxIds reply) + , recvMsgRequestTxs = \txids -> do + reply <- recvMsgRequestTxs txids + threadDelay d + pure (wrapTxs reply) + } + + wrapTxIds :: ClientStTxIds blocking txid tx m a + -> ClientStTxIds blocking txid tx m a + wrapTxIds (SendMsgReplyTxIds reply k) = SendMsgReplyTxIds reply (wrapIdle k) + wrapTxIds (SendMsgDone a) = SendMsgDone a + + wrapTxs :: ClientStTxs txid tx m a -> ClientStTxs txid tx m a + wrapTxs (SendMsgReplyTxs txs k) = SendMsgReplyTxs txs (wrapIdle k) + + +-- | Drop each body in 'MsgReplyTxs' independently with the given +-- probability; txid replies pass through unchanged. Models a peer whose +-- mempool evicts entries between advertise and fetch — the receiver sees +-- a body list that is a subset of what it requested. +-- +-- Randomness is threaded through a 'StrictTVar' so the test can seed it +-- from a QuickCheck-generated value and produce reproducible drop +-- patterns. Each body is decided independently. +-- +-- Recursive in the same way as 'delayBodies'. +omitBodies + :: forall txid tx m a. + MonadSTM m + => StrictTVar m StdGen + -> Double + -- ^ drop probability for each body, in [0, 1] + -> TxSubmissionClient txid tx m a + -> TxSubmissionClient txid tx m a +omitBodies genVar p (TxSubmissionClient mIdle) = + TxSubmissionClient (wrapIdle <$> mIdle) + where + wrapIdle :: ClientStIdle txid tx m a -> ClientStIdle txid tx m a + wrapIdle ClientStIdle { recvMsgRequestTxIds, recvMsgRequestTxs } = ClientStIdle + { recvMsgRequestTxIds = \blocking ack req -> do + reply <- recvMsgRequestTxIds blocking ack req + pure (wrapTxIds reply) + , recvMsgRequestTxs = \txids -> do + SendMsgReplyTxs txs k <- recvMsgRequestTxs txids + kept <- atomically (filterM (const rollKeep) txs) + pure (SendMsgReplyTxs kept (wrapIdle k)) + } + + wrapTxIds :: ClientStTxIds blocking txid tx m a + -> ClientStTxIds blocking txid tx m a + wrapTxIds (SendMsgReplyTxIds reply k) = SendMsgReplyTxIds reply (wrapIdle k) + wrapTxIds (SendMsgDone a) = SendMsgDone a + + rollKeep = stateTVar genVar $ \g -> + case uniformR (0 :: Double, 1) g of + (x, g') -> (x >= p, g') From 3f0a3ad9df6f145593b3be1696d84333d22b6413 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 28 Apr 2026 16:38:37 +0200 Subject: [PATCH 45/67] fixup: trace score increase for ommited txs --- .../Ouroboros/Network/TxSubmission/Inbound/V2.hs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs index 3fc0f76efda..39ca5f13c0a 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs @@ -318,8 +318,18 @@ txSubmissionInboundV2 throwIO protocolError now <- getMonotonicTime (penaltyCount, peerState') <- applyReceivedTxs now [ (txId tx, tx) | tx <- txs ] peerState - let peerState'' | penaltyCount == 0 = peerState' - | otherwise = snd (State.applyPeerRejections policy now penaltyCount peerState') + peerState'' <- + if penaltyCount == 0 + then pure peerState' + else do + let (score, ps) = State.applyPeerRejections policy now penaltyCount peerState' + traceWith tracer $ + TraceTxSubmissionProcessed ProcessedTxCount { + ptxcAccepted = 0, + ptxcRejected = penaltyCount, + ptxcScore = score + } + pure ps continueWithStateM (continueAfterReplies n) peerState'' -- Partition submitted transactions into accepted and rejected groups From cb2fa16f4caf977402e193924e60c0df85ded782 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 28 Apr 2026 17:03:23 +0200 Subject: [PATCH 46/67] fixup fixup --- .../Ouroboros/Network/TxSubmission/AppV2.hs | 38 +--------------- .../Network/TxSubmission/Impaired.hs | 43 ++++++++++++++++++- 2 files changed, 43 insertions(+), 38 deletions(-) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs index 4bbe1b517d8..5483ce414b6 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs @@ -59,9 +59,8 @@ import Ouroboros.Network.TxSubmission.Inbound.V2.Types import Ouroboros.Network.TxSubmission.Outbound import Ouroboros.Network.Util.ShowProxy -import System.Random (mkStdGen) - -import Test.Ouroboros.Network.TxSubmission.Impaired (delayBodies, omitBodies) +import Test.Ouroboros.Network.TxSubmission.Impaired (Impairment (..), + applyImpairment, noImpairment) import Test.Ouroboros.Network.TxSubmission.TxLogic hiding (tests) import Test.Ouroboros.Network.TxSubmission.Types import Test.Ouroboros.Network.Utils hiding (debugTracer) @@ -100,39 +99,6 @@ data TxSubmissionState = , decisionPolicy :: TxDecisionPolicy } deriving (Show) --- | Behavioural fault injection on a peer's outbound 'TxSubmissionClient'. --- Peers absent from 'peerImpairment' run with no impairment. -data Impairment = Impairment - { impairBodyDelay :: Maybe DiffTime - -- ^ added before each MsgReplyTxs; txid replies are unaffected - , impairOmitProb :: Double - -- ^ per-body Bernoulli drop probability, in [0, 1] - , impairSeed :: Int - -- ^ seed for the per-peer StdGen used by 'omitBodies' - } deriving Show - -noImpairment :: Impairment -noImpairment = Impairment { impairBodyDelay = Nothing - , impairOmitProb = 0 - , impairSeed = 0 - } - --- | Wrap a 'TxSubmissionClient' with the given 'Impairment'. Allocates a --- per-peer 'StdGen' TVar only when the omission rate is non-zero. -applyImpairment :: (MonadDelay m, MonadSTM m) - => Impairment - -> TxSubmissionClient txid tx m a - -> m (TxSubmissionClient txid tx m a) -applyImpairment Impairment { impairBodyDelay, impairOmitProb, impairSeed } c0 = do - c1 <- if impairOmitProb > 0 - then do - genVar <- newTVarIO (mkStdGen impairSeed) - pure (omitBodies genVar impairOmitProb c0) - else pure c0 - pure $ case impairBodyDelay of - Just d -> delayBodies d c1 - Nothing -> c1 - instance Arbitrary TxSubmissionState where arbitrary = do ArbTxDecisionPolicy decisionPolicy <- arbitrary diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Impaired.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Impaired.hs index 8a96533ce68..8facbd7e8d8 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Impaired.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Impaired.hs @@ -13,14 +13,17 @@ module Test.Ouroboros.Network.TxSubmission.Impaired ( delayBodies , omitBodies + , Impairment (..) + , noImpairment + , applyImpairment ) where import Control.Concurrent.Class.MonadSTM.Strict (MonadSTM, StrictTVar, - atomically, stateTVar) + atomically, newTVarIO, stateTVar) import Control.Monad (filterM) import Control.Monad.Class.MonadTime.SI (DiffTime) import Control.Monad.Class.MonadTimer.SI (MonadDelay, threadDelay) -import System.Random (StdGen, uniformR) +import System.Random (StdGen, mkStdGen, uniformR) import Ouroboros.Network.Protocol.TxSubmission2.Client @@ -101,3 +104,39 @@ omitBodies genVar p (TxSubmissionClient mIdle) = rollKeep = stateTVar genVar $ \g -> case uniformR (0 :: Double, 1) g of (x, g') -> (x >= p, g') + + +-- | Behavioural fault injection on a peer's outbound 'TxSubmissionClient'. +-- Peers configured with 'noImpairment' run unwrapped. +data Impairment = Impairment + { impairBodyDelay :: Maybe DiffTime + -- ^ added before each MsgReplyTxs; txid replies are unaffected + , impairOmitProb :: Double + -- ^ per-body Bernoulli drop probability, in [0, 1] + , impairSeed :: Int + -- ^ seed for the per-peer StdGen used by 'omitBodies' + } deriving Show + +-- | The neutral impairment: no delay, no omission. Equivalent to running the +-- client unwrapped. +noImpairment :: Impairment +noImpairment = Impairment { impairBodyDelay = Nothing + , impairOmitProb = 0 + , impairSeed = 0 + } + +-- | Wrap a 'TxSubmissionClient' with the given 'Impairment'. Allocates a +-- per-peer 'StdGen' TVar only when the omission rate is non-zero. +applyImpairment :: (MonadDelay m, MonadSTM m) + => Impairment + -> TxSubmissionClient txid tx m a + -> m (TxSubmissionClient txid tx m a) +applyImpairment Impairment { impairBodyDelay, impairOmitProb, impairSeed } c0 = do + c1 <- if impairOmitProb > 0 + then do + genVar <- newTVarIO (mkStdGen impairSeed) + pure (omitBodies genVar impairOmitProb c0) + else pure c0 + pure $ case impairBodyDelay of + Just d -> delayBodies d c1 + Nothing -> c1 From 62be0f493e6c21f786b9b734e0c7c6b44096853b Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Thu, 30 Apr 2026 10:30:30 +0200 Subject: [PATCH 47/67] fixup: diffusion level scoring test Add prop_txSubmission_score_impairment which verifies TX submisssion V2's scoring functionality. --- .../Test/Cardano/Network/Diffusion/Testnet.hs | 284 +++++++++++++++++- .../Diffusion/Testnet/MiniProtocols.hs | 27 +- .../Network/Diffusion/Testnet/Simulation.hs | 16 +- 3 files changed, 300 insertions(+), 27 deletions(-) diff --git a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs index 0bb1222691f..4819585197d 100644 --- a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs +++ b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs @@ -101,6 +101,8 @@ import Simulation.Network.Snocket (BearerInfo (..), noAttenuation) import Test.Cardano.Network.Diffusion.Testnet.ChainedTxs (ChainedPeerTxs (..)) import Test.Cardano.Network.Diffusion.Testnet.ChainedTxs qualified as ChainedTxs import Test.Cardano.Network.Diffusion.Testnet.Simulation +import Test.Ouroboros.Network.TxSubmission.Impaired (Impairment (..), + noImpairment) import Test.Ouroboros.Network.ConnectionManager.Timeouts import Test.Ouroboros.Network.ConnectionManager.Utils @@ -279,6 +281,8 @@ tests = prop_check_inflight_ratio , testProperty "tx chain integrity" prop_txSubmission_chainIntegrity_iosim + , testProperty "score impairment" + prop_txSubmission_score_impairment ] , testGroup "Churn" [ testProperty "no timeouts" prop_churn_notimeouts_iosim @@ -460,6 +464,7 @@ unit_cm_valid_transitions = False (Script (PraosFetchMode FetchModeBulkSync :| [PraosFetchMode FetchModeBulkSync])) [] + noImpairment , [JoinNetwork 0.5] ) , ( NodeArgs @@ -501,6 +506,7 @@ unit_cm_valid_transitions = False (Script (PraosFetchMode FetchModeDeadline :| [])) [] + noImpairment , [JoinNetwork 1.484_848_484_848] ) ] @@ -661,7 +667,8 @@ unit_connection_manager_trace_coverage = naChainSyncExitOnBlockNo = Nothing, naChainSyncEarlyExit = False, naFetchModeScript = Script (PraosFetchMode FetchModeDeadline :| []), - naTxs = [] + naTxs = [], + naTxImpairment = noImpairment } , [JoinNetwork 0] @@ -696,7 +703,8 @@ unit_connection_manager_trace_coverage = naChainSyncExitOnBlockNo = Nothing, naChainSyncEarlyExit = False, naFetchModeScript = Script (PraosFetchMode FetchModeDeadline :| []), - naTxs = [] + naTxs = [], + naTxImpairment = noImpairment } , [JoinNetwork 0] ) @@ -785,7 +793,8 @@ unit_connection_manager_transitions_coverage = naChainSyncExitOnBlockNo = Nothing, naChainSyncEarlyExit = False, naFetchModeScript = Script (PraosFetchMode FetchModeDeadline :| []), - naTxs = [] + naTxs = [], + naTxImpairment = noImpairment } , [JoinNetwork 0] ) @@ -819,7 +828,8 @@ unit_connection_manager_transitions_coverage = naChainSyncExitOnBlockNo = Nothing, naChainSyncEarlyExit = False, naFetchModeScript = Script (PraosFetchMode FetchModeDeadline :| []), - naTxs = [] + naTxs = [], + naTxImpairment = noImpairment } , [JoinNetwork 0] ) @@ -960,6 +970,7 @@ prop_txSubmission_allTransactions (ArbTxDecisionPolicy decisionPolicy) False (Script (PraosFetchMode FetchModeDeadline :| [])) uniqueTxsA + noImpairment , [JoinNetwork 0]) , (NodeArgs (-1) @@ -990,6 +1001,7 @@ prop_txSubmission_allTransactions (ArbTxDecisionPolicy decisionPolicy) False (Script (PraosFetchMode FetchModeDeadline :| [])) uniqueTxsB + noImpairment , [JoinNetwork 0]) ] in checkAllTransactions (runSimTrace @@ -1034,7 +1046,7 @@ prop_txSubmission_allTransactions (ArbTxDecisionPolicy decisionPolicy) case x of -- When we add txids to the mempool, we collect them -- into the map - DiffusionTxSubmissionInbound (TraceTxInboundAddedToMempool txids _) -> + DiffusionTxSubmissionInbound _ (TraceTxInboundAddedToMempool txids _) -> Map.alter (maybe (Just txids) (Just . sort . (txids ++))) n rr -- if a node would be killed, we could download some txs -- multiple times, but this is not possible in the schedule @@ -1140,6 +1152,7 @@ txChainIntegrityDiffScript (ArbTxDecisionPolicy decisionPolicy) False (Script (PraosFetchMode FetchModeDeadline :| [])) [] + noImpairment , [JoinNetwork 0] ) , ( NodeArgs (-2) @@ -1159,6 +1172,7 @@ txChainIntegrityDiffScript (ArbTxDecisionPolicy decisionPolicy) False (Script (PraosFetchMode FetchModeDeadline :| [])) chainedTxsB + noImpairment , [JoinNetwork 0] ) , ( NodeArgs (-3) @@ -1178,6 +1192,7 @@ txChainIntegrityDiffScript (ArbTxDecisionPolicy decisionPolicy) False (Script (PraosFetchMode FetchModeDeadline :| [])) chainedTxsC + noImpairment , [JoinNetwork 0] ) ] @@ -1227,7 +1242,7 @@ checkTxChainIntegrity (ChainedPeerTxs chainedTxsB chainedTxsC) foldr (\l r -> List.foldl' (\rr (WithName n (WithTime _ x)) -> case x of - DiffusionTxSubmissionInbound (TraceTxInboundAddedToMempool txids _) -> + DiffusionTxSubmissionInbound _ (TraceTxInboundAddedToMempool txids _) -> Map.alter (maybe (Just txids) (Just . sort . (txids ++))) n rr _ -> rr) r l ) Map.empty @@ -1293,6 +1308,232 @@ prop_txSubmission_chainIntegrity_iosim :: ArbTxDecisionPolicy prop_txSubmission_chainIntegrity_iosim = prop_txSubmission_chainIntegrity +-- | Policy used by the score-impairment fixture. Default policy with a +-- guaranteed inflight cap of 2 so the receiver can request bodies from +-- B and C in parallel. +txScoreImpairmentPolicy :: TxDecisionPolicy +txScoreImpairmentPolicy = + defaultTxDecisionPolicy { txInflightMultiplicity = 2 } + +-- | Inputs for 'prop_txSubmission_score_impairment'. Body delays are +-- expressed as multipliers of 'interTxSpace' so the test is robust to +-- changes in the policy default. The generator enforces: +-- +-- * 'siiBDelayMul' > 1, so B's lease expires before B delivers, freeing +-- the cap=2 slot for C to claim in parallel. +-- * 'siiCDelayMul' > 'siiBDelayMul', so C's body is consistently later +-- than B's. +data ScoreImpairmentInput = ScoreImpairmentInput + { siiTxCount :: Int + , siiBDelayMul :: Double + , siiCDelayMul :: Double + } deriving Show + +instance Arbitrary ScoreImpairmentInput where + arbitrary = do + n <- choose (1, 20) + bMul <- choose (1.5, 4.0) + cMul <- (bMul +) <$> choose (1.0, 6.0) + pure (ScoreImpairmentInput n bMul cMul) + -- Only shrink the tx count: the delay constraint cMul > bMul > 1 is + -- delicate and shrinking either independently could violate it. + shrink (ScoreImpairmentInput n bMul cMul) = + [ ScoreImpairmentInput n' bMul cMul + | n' <- shrink n, n' >= 1 + ] + +-- | Fixture for the score-impairment test. Topology: 1 receiver (-1) + +-- 2 upstream peers (-2 = B well-behaved, -3 = C delays bodies). Both +-- upstreams carry the same valid tx set so they race to deliver each tx; +-- C's body-delay impairment makes its replies consistently late, exposing +-- the receiver's late-body penalty path. +txScoreImpairmentDiffScript :: ScoreImpairmentInput -> DiffusionScript +txScoreImpairmentDiffScript ScoreImpairmentInput { siiTxCount, siiBDelayMul, siiCDelayMul } = + let localRootConfig = LocalRootConfig + DoNotAdvertisePeer + InitiatorAndResponderDiffusionMode + Outbound + IsNotTrustable + + noPeerTargets = PeerSelectionTargets { + targetNumberOfRootPeers = 0, + targetNumberOfKnownPeers = 0, + targetNumberOfEstablishedPeers = 0, + targetNumberOfActivePeers = 0, + targetNumberOfKnownBigLedgerPeers = 0, + targetNumberOfEstablishedBigLedgerPeers = 0, + targetNumberOfActiveBigLedgerPeers = 0 + } + + upstreamTargets = PeerSelectionTargets { + targetNumberOfRootPeers = 1, + targetNumberOfKnownPeers = 1, + targetNumberOfEstablishedPeers = 1, + targetNumberOfActivePeers = 1, + targetNumberOfKnownBigLedgerPeers = 0, + targetNumberOfEstablishedBigLedgerPeers = 0, + targetNumberOfActiveBigLedgerPeers = 0 + } + + validTx i sz = Tx { getTxId = i + , getTxSize = sz + , getTxAdvSize = sz + , getTxValid = True + , getTxParent = Nothing + } + sharedTxs = [ validTx i sz + | (i, sz) <- zip [0 .. siiTxCount - 1] (cycle [100, 250, 500]) + ] + + bDelay = realToFrac siiBDelayMul * interTxSpace txScoreImpairmentPolicy + cDelay = realToFrac siiCDelayMul * interTxSpace txScoreImpairmentPolicy + + bImpairment = noImpairment { impairBodyDelay = Just bDelay } + cImpairment = noImpairment { impairBodyDelay = Just cDelay } + + in DiffusionScript + (SimArgs 1 10 txScoreImpairmentPolicy) + (singletonTimedScript Map.empty) + [ ( NodeArgs + (-1) + InitiatorAndResponderDiffusionMode + Map.empty + PraosMode + (Script (DontUseBootstrapPeers :| [])) + (TestAddress (IPAddr (read "0.0.0.0") 0)) + PeerSharingDisabled + [] + (Script (LedgerPools [] :| [])) + (noPeerTargets, noPeerTargets) + (Script (DNSTimeout {getDNSTimeout = 10} :| [])) + (Script (DNSLookupDelay {getDNSLookupDelay = 0} :| [])) + Nothing + False + (Script (PraosFetchMode FetchModeDeadline :| [])) + [] + noImpairment + , [JoinNetwork 0] ) + , ( NodeArgs + (-2) + InitiatorAndResponderDiffusionMode + Map.empty + PraosMode + (Script (DontUseBootstrapPeers :| [])) + (TestAddress (IPAddr (read "0.0.0.1") 0)) + PeerSharingDisabled + [(1, 1, Map.fromList + [(RelayAccessAddress "0.0.0.0" 0, localRootConfig)])] + (Script (LedgerPools [] :| [])) + (upstreamTargets, upstreamTargets) + (Script (DNSTimeout {getDNSTimeout = 10} :| [])) + (Script (DNSLookupDelay {getDNSLookupDelay = 0} :| [])) + Nothing + False + (Script (PraosFetchMode FetchModeDeadline :| [])) + sharedTxs + bImpairment + , [JoinNetwork 0] ) + , ( NodeArgs + (-3) + InitiatorAndResponderDiffusionMode + Map.empty + PraosMode + (Script (DontUseBootstrapPeers :| [])) + (TestAddress (IPAddr (read "0.0.0.2") 0)) + PeerSharingDisabled + [(1, 1, Map.fromList + [(RelayAccessAddress "0.0.0.0" 0, localRootConfig)])] + (Script (LedgerPools [] :| [])) + (upstreamTargets, upstreamTargets) + (Script (DNSTimeout {getDNSTimeout = 10} :| [])) + (Script (DNSLookupDelay {getDNSLookupDelay = 0} :| [])) + Nothing + False + (Script (PraosFetchMode FetchModeDeadline :| [])) + sharedTxs + cImpairment + , [JoinNetwork 0] ) + ] + +-- | Score-targeted impairment property. Asserts that the receiver +-- accumulates a higher peak peer-score for the body-delaying upstream +-- (C) than for the well-behaved one (B). With all txs valid and B +-- typically winning the body race, B should accumulate no rejection +-- penalty while C's late deliveries should drive its score up. +prop_txSubmission_score_impairment :: ScoreImpairmentInput -> Property +prop_txSubmission_score_impairment input@ScoreImpairmentInput { siiTxCount, siiBDelayMul, siiCDelayMul } = + let trace = runSimTrace + $ diffusionSimulation noAttenuation + (txScoreImpairmentDiffScript input) + + receiverAddr = TestAddress (IPAddr (read "0.0.0.0") 0) + peerB = TestAddress (IPAddr (read "0.0.0.1") 0) + peerC = TestAddress (IPAddr (read "0.0.0.2") 0) + + scores :: Map NtNAddr Double + scores = + Map.fromListWith max + [ (peer, ptxcScore ptxc) + | WithTime _ (WithName receiver ev) <- + Trace.toList + . withTimeNameTraceEvents @DiffusionTestTrace @NtNAddr + . Trace.take 500_000 + $ trace + , receiver == receiverAddr + , DiffusionTxSubmissionInbound peer (TraceTxSubmissionProcessed ptxc) <- [ev] + ] + + scoreB = Map.findWithDefault 0 peerB scores + scoreC = Map.findWithDefault 0 peerC scores + + allEvents :: [(NtNAddr, NtNAddr, TraceTxSubmissionInbound Int (Tx Int))] + allEvents = + [ (receiver, peer, tr) + | WithTime _ (WithName receiver ev) <- + Trace.toList + . withTimeNameTraceEvents @DiffusionTestTrace @NtNAddr + . Trace.take 500_000 + $ trace + , DiffusionTxSubmissionInbound peer tr <- [ev] + ] + + -- Count txs each upstream successfully delivered into the receiver's + -- mempool, by tagged peer of the corresponding inbound connection. + -- This is the behavioural test of the score subsystem: a node should + -- prefer peers that deliver valid txs quickly, so B (well-behaved) + -- should outperform C (delayed) in the race to land bodies first. + deliveredByPeer :: Map NtNAddr Int + deliveredByPeer = + Map.fromListWith (+) + [ (peer, length txids) + | (receiver, peer, TraceTxInboundAddedToMempool txids _) <- allEvents + , receiver == receiverAddr + ] + + deliveredB = Map.findWithDefault 0 peerB deliveredByPeer + deliveredC = Map.findWithDefault 0 peerC deliveredByPeer + + in counterexample ("inbound events seen: " ++ show (length allEvents)) + . counterexample ("scores: " ++ show scores) + . counterexample ("scoreB=" ++ show scoreB ++ " scoreC=" ++ show scoreC) + . counterexample ("deliveredB=" ++ show deliveredB + ++ " deliveredC=" ++ show deliveredC) + . tabulate "tx count" [show siiTxCount] + . tabulate "B delay (xITS)" [show (round siiBDelayMul :: Int)] + . tabulate "C delay (xITS)" [show (round siiCDelayMul :: Int)] + . tabulate "C/B delay ratio" [show (round (siiCDelayMul / siiBDelayMul) :: Int)] + $ conjoin + [ counterexample "B must not accumulate any penalty" + $ scoreB == 0 + , counterexample "C must accumulate a penalty" + $ scoreC > 0 + , counterexample "C's score must stay within scoreMax" + $ scoreC <= scoreMax txScoreImpairmentPolicy + , counterexample "B should deliver strictly more txs than C" + $ deliveredB > deliveredC + ] + + -- | This test checks the ratio of the inflight txs against the allowed by the -- TxDecisionPolicy. -- @@ -1705,6 +1946,7 @@ unit_4177 = prop_inbound_governor_transitions_coverage absNoAttenuation script False (Script (PraosFetchMode FetchModeDeadline :| [])) [] + noImpairment , [JoinNetwork 1.742_857_142_857 ,Reconfigure 6.333_333_333_33 [(1,1,Map.fromList [(RelayAccessDomain "test2" 65_535,LocalRootConfig DoAdvertisePeer InitiatorAndResponderDiffusionMode Outbound IsNotTrustable)]), (1,1,Map.fromList [(RelayAccessAddress "0:6:0:3:0:6:0:5" 65_530,LocalRootConfig DoAdvertisePeer InitiatorAndResponderDiffusionMode Outbound IsNotTrustable) @@ -1739,6 +1981,7 @@ unit_4177 = prop_inbound_governor_transitions_coverage absNoAttenuation script False (Script (PraosFetchMode FetchModeDeadline :| [])) [] + noImpairment , [JoinNetwork 0.183_783_783_783 ,Reconfigure 4.533_333_333_333 [(1,1,Map.empty)] ] @@ -2402,6 +2645,7 @@ unit_4191 = testWithIOSim prop_diffusion_dns_can_recover long_trace absInfo scri False (Script (PraosFetchMode FetchModeDeadline :| [])) [] + noImpairment , [ JoinNetwork 6.710_144_927_536 , Kill 7.454_545_454_545 , JoinNetwork 10.763_157_894_736 @@ -2490,7 +2734,8 @@ prop_connect_failure (AbsIOError ioerr) = naChainSyncExitOnBlockNo = Nothing, naChainSyncEarlyExit = False, naFetchModeScript = Script (PraosFetchMode FetchModeDeadline :| []), - naTxs = [] + naTxs = [], + naTxImpairment = noImpairment } , [JoinNetwork 10] ), @@ -2519,7 +2764,8 @@ prop_connect_failure (AbsIOError ioerr) = naChainSyncExitOnBlockNo = Nothing, naChainSyncEarlyExit = False, naFetchModeScript = Script (PraosFetchMode FetchModeDeadline :| []), - naTxs = [] + naTxs = [], + naTxImpairment = noImpairment } , [JoinNetwork 0] ) @@ -2618,7 +2864,8 @@ prop_accept_failure (AbsIOError ioerr) = naChainSyncExitOnBlockNo = Nothing, naChainSyncEarlyExit = False, naFetchModeScript = Script (PraosFetchMode FetchModeDeadline :| []), - naTxs = [] + naTxs = [], + naTxImpairment = noImpairment } , [JoinNetwork 10] ), @@ -2647,7 +2894,8 @@ prop_accept_failure (AbsIOError ioerr) = naChainSyncExitOnBlockNo = Nothing, naChainSyncEarlyExit = False, naFetchModeScript = Script (PraosFetchMode FetchModeDeadline :| []), - naTxs = [] + naTxs = [], + naTxImpairment = noImpairment } , [JoinNetwork 0] ) @@ -3734,7 +3982,8 @@ async_demotion_network_script = = False, naPeerSharing = PeerSharingDisabled, naFetchModeScript = singletonScript (PraosFetchMode FetchModeDeadline), - naTxs = [] + naTxs = [], + naTxImpairment = noImpairment } @@ -4301,6 +4550,7 @@ prop_unit_4258 = False (Script (PraosFetchMode FetchModeDeadline :| [])) [] + noImpairment , [ JoinNetwork 4.166_666_666_666, Kill 0.3, JoinNetwork 1.517_857_142_857, @@ -4344,6 +4594,7 @@ prop_unit_4258 = False (Script (PraosFetchMode FetchModeDeadline :| [])) [] + noImpairment , [ JoinNetwork 3.384_615_384_615, Reconfigure 3.583_333_333_333 [(1,1,Map.fromList [(RelayAccessAddress "0.0.0.4" 9,LocalRootConfig DoNotAdvertisePeer InitiatorAndResponderDiffusionMode Outbound IsNotTrustable)])], Kill 15.555_555_555_55, @@ -4407,6 +4658,7 @@ prop_unit_reconnect = False (Script (PraosFetchMode FetchModeDeadline :| [])) [] + noImpairment , [ JoinNetwork 0 ]) , (NodeArgs @@ -4435,6 +4687,7 @@ prop_unit_reconnect = False (Script (PraosFetchMode FetchModeDeadline :| [])) [] + noImpairment , [ JoinNetwork 10 ]) ] @@ -4860,7 +5113,8 @@ unit_peer_sharing = naChainSyncExitOnBlockNo = Nothing, naFetchModeScript = singletonScript (PraosFetchMode FetchModeDeadline), naConsensusMode, - naTxs = [] + naTxs = [], + naTxImpairment = noImpairment } script = DiffusionScript @@ -5570,7 +5824,8 @@ unit_local_root_diffusion_mode diffusionMode = naChainSyncExitOnBlockNo = Nothing, naChainSyncEarlyExit = False, naFetchModeScript = Script (PraosFetchMode FetchModeDeadline :| []), - naTxs = [] + naTxs = [], + naTxImpairment = noImpairment } , [JoinNetwork 0] ) @@ -5604,7 +5859,8 @@ unit_local_root_diffusion_mode diffusionMode = naChainSyncExitOnBlockNo = Nothing, naChainSyncEarlyExit = False, naFetchModeScript = Script (PraosFetchMode FetchModeDeadline :| []), - naTxs = [] + naTxs = [], + naTxImpairment = noImpairment } , [JoinNetwork 0] ) diff --git a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs index 8a9101196e0..34657f7414c 100644 --- a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs +++ b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs @@ -116,6 +116,8 @@ import Ouroboros.Network.TxSubmission.Outbound (txSubmissionOutbound) import Ouroboros.Network.Util.ShowProxy import Test.Ouroboros.Network.Diffusion.Node.Kernel +import Test.Ouroboros.Network.TxSubmission.Impaired (Impairment, + applyImpairment) import Test.Ouroboros.Network.TxSubmission.Types (Mempool, Tx (..), TxId, getMempoolReader, getMempoolWriter, txSubmissionCodec2) @@ -241,6 +243,9 @@ data AppArgs header block m = AppArgs , aaPeerMetrics :: PeerMetrics m NtNAddr , aaTxDecisionPolicy :: TxDecisionPolicy + , aaTxImpairment :: Impairment + -- ^ behavioural fault injection on this node's outbound + -- 'TxSubmissionClient' (default: 'noImpairment') } @@ -269,7 +274,9 @@ applications :: forall block header s m. , RandomGen s ) => Tracer m String - -> Tracer m (TraceTxSubmissionInbound Int (Tx Int)) + -> Tracer m (NtNAddr, TraceTxSubmissionInbound Int (Tx Int)) + -- ^ tagged with the remote peer address so per-peer scoring + -- events can be folded out of the trace -> Tracer m (TraceTxLogic NtNAddr Int (Tx Int)) -> NodeKernel header block s Int m -> Codecs NtNAddr header block m @@ -298,6 +305,7 @@ applications debugTracer txSubmissionInboundTracer _txSubmissionInboundDebug nod , aaPeerSharing , aaPeerMetrics , aaTxDecisionPolicy + , aaTxImpairment } toHeader duplicateTxVar = @@ -696,13 +704,14 @@ applications debugTracer txSubmissionInboundTracer _txSubmissionInboundDebug nod } channel -> do - let client = txSubmissionOutbound - (((ppNtNConnId connId ++) . (" " ++) . show) `contramap` debugTracer) - (NumTxIdsToAck $ getNumTxIdsToReq - $ maxUnacknowledgedTxIds txDecisionPolicy) - (getMempoolReader mempool) - (maxBound :: UnversionedProtocol) - controlMessageSTM + let baseClient = txSubmissionOutbound + (((ppNtNConnId connId ++) . (" " ++) . show) `contramap` debugTracer) + (NumTxIdsToAck $ getNumTxIdsToReq + $ maxUnacknowledgedTxIds txDecisionPolicy) + (getMempoolReader mempool) + (maxBound :: UnversionedProtocol) + controlMessageSTM + client <- applyImpairment aaTxImpairment baseClient labelThisThread "TxSubmissionClient" runPeerWithLimits (((ppNtNConnId connId ++) . (" " ++) . show) `contramap` debugTracer) @@ -727,7 +736,7 @@ applications debugTracer txSubmissionInboundTracer _txSubmissionInboundDebug nod txCountersVar them $ \api -> do let server = txSubmissionInboundV2 - txSubmissionInboundTracer + ((them,) `contramap` txSubmissionInboundTracer) NoTxSubmissionInitDelay aaTxDecisionPolicy (getMempoolWriter duplicateTxVar mempool) diff --git a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/Simulation.hs b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/Simulation.hs index 957bd854aca..934e0f2e90b 100644 --- a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/Simulation.hs +++ b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/Simulation.hs @@ -146,6 +146,7 @@ import Test.Ouroboros.Network.PeerSelection.RootPeersDNS (DNSLookupDelay (..), DNSTimeout (..), DomainAccessPoint (..), MockDNSMap, genDomainName) import Test.Ouroboros.Network.PeerSelection.RootPeersDNS qualified as PeerSelection hiding (tests) +import Test.Ouroboros.Network.TxSubmission.Impaired (Impairment, noImpairment) import Test.Ouroboros.Network.TxSubmission.TxLogic (ArbTxDecisionPolicy (..)) import Test.Ouroboros.Network.TxSubmission.Types (Tx (..)) import Test.Ouroboros.Network.Utils @@ -238,6 +239,9 @@ data NodeArgs = , naChainSyncEarlyExit :: Bool , naFetchModeScript :: Script FetchMode , naTxs :: [Tx Int] + , naTxImpairment :: Impairment + -- ^ behavioural fault injection on this node's outbound + -- 'TxSubmissionClient' (default: 'noImpairment') } instance Show NodeArgs where @@ -246,7 +250,7 @@ instance Show NodeArgs where naLocalRootPeers, naPeerTargets, naDNSTimeoutScript, naDNSLookupDelayScript, naChainSyncExitOnBlockNo, naChainSyncEarlyExit, naFetchModeScript, naConsensusMode, - naTxs } = + naTxs, naTxImpairment } = unwords [ "NodeArgs" , "(" ++ show naSeed ++ ")" , show naDiffusionMode @@ -264,6 +268,7 @@ instance Show NodeArgs where , show naChainSyncEarlyExit , "(" ++ show naFetchModeScript ++ ")" , show naTxs + , "(" ++ show naTxImpairment ++ ")" ] data Command = JoinNetwork DiffTime @@ -478,6 +483,7 @@ genNodeArgs relays minConnected localRootPeers self txs = flip suchThat hasUpstr , naPeerSharing = peerSharing , naFetchModeScript = fetchModeScript , naTxs = txs + , naTxImpairment = noImpairment } where makeRelayAccessPoint (relay, _, _, _) = relay @@ -970,7 +976,7 @@ data DiffusionTestTrace = | DiffusionServerTrace (Server.Trace NtNAddr) | DiffusionFetchTrace (TraceFetchClientState BlockHeader) | DiffusionChurnModeTrace TraceChurnMode - | DiffusionTxSubmissionInbound (TraceTxSubmissionInbound Int (Tx Int)) + | DiffusionTxSubmissionInbound NtNAddr (TraceTxSubmissionInbound Int (Tx Int)) | DiffusionTxLogic (TraceTxLogic NtNAddr Int (Tx Int)) | DiffusionDebugTrace String | DiffusionDNSTrace DNSTrace @@ -993,7 +999,7 @@ ppDiffusionTestTrace (DiffusionInboundGovernorTransitionTrace tr) = show tr ppDiffusionTestTrace (DiffusionServerTrace tr) = show tr ppDiffusionTestTrace (DiffusionFetchTrace tr) = show tr ppDiffusionTestTrace (DiffusionChurnModeTrace tr) = show tr -ppDiffusionTestTrace (DiffusionTxSubmissionInbound tr) = show tr +ppDiffusionTestTrace (DiffusionTxSubmissionInbound peer tr) = ppNtNAddr peer ++ " " ++ show tr ppDiffusionTestTrace (DiffusionTxLogic tr) = show tr ppDiffusionTestTrace (DiffusionDebugTrace tr) = tr ppDiffusionTestTrace (DiffusionDNSTrace tr) = show tr @@ -1178,6 +1184,7 @@ diffusionSimulationM , naPeerSharing = peerSharing , naDiffusionMode = diffusionMode , naTxs = txs + , naTxImpairment = txImpairment } ntnSnocket ntcSnocket @@ -1365,7 +1372,7 @@ diffusionSimulationM duplicateTxVar where tracerTxSubmissionInbound = - contramap DiffusionTxSubmissionInbound + contramap (uncurry DiffusionTxSubmissionInbound) . tracerWithName addr . tracerWithTime $ nodeTracer @@ -1382,6 +1389,7 @@ diffusionSimulationM , Node.aaPeerSharing = Node.aPeerSharing arguments , Node.aaPeerMetrics = peerMetrics , Node.aaTxDecisionPolicy = Node.aTxDecisionPolicy arguments + , Node.aaTxImpairment = txImpairment } Node.run From 772baf6d0e2ce68e3759b630104f947b5f7e253d Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Thu, 30 Apr 2026 10:32:10 +0200 Subject: [PATCH 48/67] fixup: scoring unit tests --- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 1173d692d3e..b0e545e2a56 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -70,6 +70,8 @@ tests = , testProperty "nextPeerAction claims claimable tx for best idle advertiser" prop_nextPeerAction_claimsClaimableTx , testProperty "nextPeerAction claims a released tx from another advertiser" prop_nextPeerAction_claimsRejectedTxFromOtherAdvertiser , testCaseSteps "nextPeerAction claims a tx once the score delay threshold has elapsed" unit_nextPeerAction_claimsAtScoreDelayThreshold + , testCaseSteps "peerScore decays linearly over time at scoreRate" unit_peerScore_decaysOverTime + , testCaseSteps "applyPeerRejections drains the existing score before adding the new rejection count" unit_applyPeerRejections_drainsThenAdds , testProperty "nextPeerAction steals expired lease for best idle advertiser" prop_nextPeerAction_claimsExpiredLease , testProperty "nextPeerAction requests an oversized first tx within the soft budget" prop_nextPeerAction_requestsOversizedFirstTx , testCaseSteps "nextPeerAction skips blocked available txs and requests later claimable ones" unit_nextPeerAction_skipsBlockedAvailableTxs @@ -1553,6 +1555,32 @@ prop_nextPeerAction_claimsClaimableTx (ArbTxDecisionPolicy policy) (Positive pee } (peerAction, peerState', sharedState') = nextPeerAction now policy peerA peerState0 sharedState0 +-- | A peer's score decays linearly at 'scoreRate' from its last +-- timestamped value, clamped to zero. +unit_peerScore_decaysOverTime :: (String -> IO ()) -> Assertion +unit_peerScore_decaysOverTime step = do + step "After 50 seconds at scoreRate 0.1 a score of 10 should drain to 5" + currentPeerScore policy (Time 50) score0 @?= 5 + step "After 200 seconds the score should be fully decayed (clamped to 0)" + currentPeerScore policy (Time 200) score0 @?= 0 + step "Reading at the same instant as the last update returns the unchanged value" + currentPeerScore policy (Time 0) score0 @?= peerScoreValue score0 + where + policy = defaultTxDecisionPolicy + score0 = PeerScore { peerScoreValue = 10, peerScoreTs = Time 0 } + +-- | A new rejection drains the existing score at 'scoreRate' per second +-- since its last update before adding the rejection count. +unit_applyPeerRejections_drainsThenAdds :: (String -> IO ()) -> Assertion +unit_applyPeerRejections_drainsThenAdds step = do + step "Starting score 10 at Time 0; one rejection 50s later: 10 - (50 * 0.1) + 1 = 6" + fst (applyPeerRejections policy (Time 50) 1 peerState0) @?= 6 + where + policy = defaultTxDecisionPolicy + peerState0 = emptyPeerTxLocalState + { peerScore = PeerScore { peerScoreValue = 10 + , peerScoreTs = Time 0 } } + unit_nextPeerAction_claimsAtScoreDelayThreshold :: (String -> IO ()) -> Assertion unit_nextPeerAction_claimsAtScoreDelayThreshold step = do step "Run nextPeerAction for a peer whose score contributes exactly a 1 ms claim delay" From b88951040d257056c27cc44a050a8b51623ef8d9 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Thu, 30 Apr 2026 12:13:41 +0200 Subject: [PATCH 49/67] fixup: improve prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected --- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 153 +++++++++++++----- 1 file changed, 110 insertions(+), 43 deletions(-) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index b0e545e2a56..0803e81c7fc 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -427,11 +427,11 @@ instance Arbitrary RequestedFate where ] rfInReply :: RequestedFate -> Bool -rfInReply RfBuffered{} = True -rfInReply RfOmitted{} = False -rfInReply RfLateRetained = True -rfInReply RfLateMempool = True -rfInReply RfOmittedPruned = False +rfInReply RfBuffered{} = True +rfInReply RfOmitted{} = False +rfInReply RfLateRetained = True +rfInReply RfLateMempool = True +rfInReply RfOmittedPruned = False rfCoAdvertised :: RequestedFate -> Bool rfCoAdvertised (RfBuffered c) = c @@ -1399,56 +1399,123 @@ prop_handleReceivedTxs ] -- Verifies that handleSubmittedTxs retains accepted txs and removes rejected --- txs from the active table and tx-key maps. +-- txs from the active table and tx-key maps. Generated over a non-empty +-- list of (txid, size, accepted-flag, co-advertised-flag): the +-- accepted-flag controls accept vs reject; the co-advertised-flag adds a +-- second peer as advertiser, so rejected co-advertised txs stay in +-- 'sharedTxTable' (only the calling peer's advertisement is removed) +-- while rejected solo txs are dropped from all maps. prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected :: ArbTxDecisionPolicy -> Positive Int - -> TxId - -> TxId - -> Positive Int - -> Positive Int + -> NonEmptyList (TxId, Positive Int, Bool, Bool) -> Property -prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected (ArbTxDecisionPolicy policy) (Positive peeraddr) txidA0 txidB0 txSizeA0 txSizeB0 = - txidA /= txidB ==> - conjoin +prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected + (ArbTxDecisionPolicy policy) + (Positive peeraddr) + (NonEmpty rawEntries) = + tabulate "accepted count" [bucket (length acceptedKeys)] + . tabulate "rejected count" [bucket (length rejectedKeys)] + . tabulate "co-advertised count" [bucket (length [() | (_, _, _, True) <- entries])] + $ conjoin $ [ peerDownloadedTxs peerState' === IntMap.empty - , IntMap.lookup kA (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) - , retainedLookup kA (sharedRetainedTxs sharedState') === Just expectedRetainUntil - , Map.lookup (getRawTxId txidA) (sharedTxIdToKey sharedState') === Just keyA - , IntMap.lookup kA (sharedKeyToTxId sharedState') === Just txidA - , IntMap.lookup kB (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) - , retainedLookup kB (sharedRetainedTxs sharedState') === Nothing - , Map.lookup (getRawTxId txidB) (sharedTxIdToKey sharedState') === Nothing - , IntMap.lookup kB (sharedKeyToTxId sharedState') === Nothing - , sharedGeneration sharedState' === 1 + , sharedGeneration sharedState' === sharedGeneration sharedState0 + 1 , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) , checkNoThunks "sharedState'" sharedState' ] + ++ + [ counterexample ("accepted txid=" ++ show txid) (acceptedAssertions txid) + | (txid, _, True, _) <- entries + ] + ++ + [ counterexample ("rejected solo txid=" ++ show txid) (rejectedSoloAssertions txid) + | (txid, _, False, False) <- entries + ] + ++ + [ counterexample ("rejected co-advertised txid=" ++ show txid) (rejectedCoAdvAssertions txid) + | (txid, _, False, True) <- entries + ] where - txidA = abs txidA0 + 1 - txidB = abs txidB0 + 2 - txSizeA = mkSize txSizeA0 - txSizeB = mkSize txSizeB0 - txA = mkTx txidA txSizeA - txB = mkTx txidB txSizeB + -- Use a distinct address as the second advertiser. Adding @peeraddr + 1@ + -- guarantees they don't clash even if QC produces consecutive ids. + otherPeer = peeraddr + 1 + + -- Normalise: shift txids to >= 1, convert sizes, dedupe by shifted txid + -- (first occurrence wins). NonEmpty input ensures at least one entry + -- survives. + entries :: [(TxId, SizeInBytes, Bool, Bool)] + entries = nubBy ((==) `on` (\(t, _, _, _) -> t)) + $ map (\(t, sz, acc, co) -> (abs t + 1, mkSize sz, acc, co)) rawEntries + sharedState0 = - let st = mkSharedState [txidA, txidB] - keyA' = lookupKeyOrFail txidA st - keyB' = lookupKeyOrFail txidB st in - ensurePeerAdvertisesTxKeys peeraddr [keyA', keyB'] $ - st { - sharedTxTable = IntMap.fromList - [ (unTxKey keyA', mkTxEntry peeraddr txSizeA (Just TxBuffered) policy) - , (unTxKey keyB', mkTxEntry peeraddr txSizeB (Just TxBuffered) policy) - ] + let st = mkSharedState [ txid | (txid, _, _, _) <- entries ] + peeraddrKeys = [ lookupKeyOrFail txid st | (txid, _, _, _) <- entries ] + otherPeerKeys = [ lookupKeyOrFail txid st | (txid, _, _, True) <- entries ] in + ensurePeerAdvertisesTxKeys otherPeer otherPeerKeys + $ ensurePeerAdvertisesTxKeys peeraddr peeraddrKeys + $ st { sharedTxTable = IntMap.fromList + [ (unTxKey (lookupKeyOrFail txid st), + (mkTxEntry peeraddr sz (Just TxBuffered) policy) + { txAdvertiserCount = if co then 2 else 1 }) + | (txid, sz, _, co) <- entries + ] } - keyA = lookupKeyOrFail txidA sharedState0 - keyB = lookupKeyOrFail txidB sharedState0 - kA = unTxKey keyA - kB = unTxKey keyB - peerState0 = emptyPeerTxLocalState { peerDownloadedTxs = IntMap.fromList [(kA, txA), (kB, txB)] } + + keyOf txid = lookupKeyOrFail txid sharedState0 + kOf = unTxKey . keyOf + + acceptedKeys = [ keyOf txid | (txid, _, True, _) <- entries ] + rejectedKeys = [ keyOf txid | (txid, _, False, _) <- entries ] + + peerState0 = emptyPeerTxLocalState + { peerDownloadedTxs = IntMap.fromList + [ (kOf txid, mkTx txid sz) + | (txid, sz, _, _) <- entries + ] + } + expectedRetainUntil = addTime (bufferedTxsMinLifetime policy) now - (peerState', sharedState') = handleSubmittedTxs now policy peeraddr [keyA] [keyB] peerState0 sharedState0 + + (peerState', sharedState') = + handleSubmittedTxs now policy peeraddr acceptedKeys rejectedKeys + peerState0 sharedState0 + + advertisedKeysOf peer st = + sharedPeerAdvertisedTxKeys (lookupPeerOrFail peer st) + + acceptedAssertions txid = + let k = kOf txid + key = keyOf txid in + conjoin + [ IntMap.lookup k (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) + , retainedLookup k (sharedRetainedTxs sharedState') === Just expectedRetainUntil + , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Just key + , IntMap.lookup k (sharedKeyToTxId sharedState') === Just txid + ] + + rejectedSoloAssertions txid = + let k = kOf txid in + conjoin + [ IntMap.lookup k (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) + , retainedLookup k (sharedRetainedTxs sharedState') === Nothing + , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Nothing + , IntMap.lookup k (sharedKeyToTxId sharedState') === Nothing + ] + + rejectedCoAdvAssertions txid = + let k = kOf txid + key = keyOf txid in + conjoin + [ counterexample "entry should remain in sharedTxTable" + $ isJust (IntMap.lookup k (sharedTxTable sharedState')) + , counterexample "this peer's advertisement should be removed" + $ not (IntSet.member k (advertisedKeysOf peeraddr sharedState')) + , counterexample "co-advertiser's advertisement should remain" + $ IntSet.member k (advertisedKeysOf otherPeer sharedState') + , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Just key + , IntMap.lookup k (sharedKeyToTxId sharedState') === Just txid + , retainedLookup k (sharedRetainedTxs sharedState') === Nothing + ] -- Verifies that nextPeerAction submits buffered txs owned by the peer before -- taking any other action. From 226d3daccc1b2fb66029f208d33bc46b6b5fdc33 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 1 May 2026 09:42:26 +0200 Subject: [PATCH 50/67] fixup: prop_nextPeerAction_processesAllTriggers test Test nextPeerAction but running it through a series of triggers for a small group of peers. --- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 683 +++++++++++++++++- 1 file changed, 678 insertions(+), 5 deletions(-) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 0803e81c7fc..5fedb7bcf99 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -32,9 +32,9 @@ import Data.Foldable (foldl', toList) import Data.Function (on) import Data.IntMap.Strict qualified as IntMap import Data.IntSet qualified as IntSet -import Data.List (nub, nubBy) +import Data.List (mapAccumL, nub, nubBy, sortBy) import Data.Map.Strict qualified as Map -import Data.Maybe (isJust) +import Data.Maybe (isJust, listToMaybe) import Data.Sequence.Strict qualified as StrictSeq import Data.Set qualified as Set import Data.Word (Word64) @@ -53,20 +53,32 @@ import Ouroboros.Network.TxSubmission.Inbound.V2.Types import Test.Ouroboros.Network.TxSubmission.Types import Test.QuickCheck -import Test.Tasty (TestTree, testGroup) +import Test.Tasty (TestTree, localOption, testGroup) import Test.Tasty.HUnit (Assertion, assertBool, assertFailure, testCaseSteps, (@?=)) -import Test.Tasty.QuickCheck (testProperty) +import Test.Tasty.QuickCheck (QuickCheckTests (..), testProperty) tests :: TestTree tests = testGroup "TxLogic" - [ testProperty "handleReceivedTxIds handles mixed new / retained / mempool txids" prop_handleReceivedTxIds + [ localOption (QuickCheckTests 50) $ + testGroup "TriggerScenario meta-tests" + [ testProperty "generated scenario produces a valid initial state" + prop_TriggerScenario_validInitialState + , testProperty "shrink preserves validity" + prop_TriggerScenario_shrinkPreservesValidity + , testProperty "shrink does not grow the trigger list" + prop_TriggerScenario_shrinkSmaller + , testProperty "shrink does not contain the original value" + prop_TriggerScenario_shrinkExcludesOriginal + ] + , testProperty "handleReceivedTxIds handles mixed new / retained / mempool txids" prop_handleReceivedTxIds , testCaseSteps "handleReceivedTxIds adds the current peer as an advertiser for active txs" unit_handleReceivedTxIds_addsAdvertiserForActiveTxs , testCaseSteps "nextPeerAction lets another peer claim a fresh tx when the first advertiser is full" unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull , testProperty "handleReceivedTxs handles mixed buffered / omitted / late-retained / late-mempool / pruned txids" prop_handleReceivedTxs , testProperty "handleSubmittedTxs retains accepted and drops rejected" prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected , testProperty "nextPeerAction prioritises submitting buffered owned txs" prop_nextPeerAction_prioritisesSubmit + , testProperty "nextPeerAction processes all multi-peer triggers" prop_nextPeerAction_processesAllTriggers , testProperty "nextPeerAction claims claimable tx for best idle advertiser" prop_nextPeerAction_claimsClaimableTx , testProperty "nextPeerAction claims a released tx from another advertiser" prop_nextPeerAction_claimsRejectedTxFromOtherAdvertiser , testCaseSteps "nextPeerAction claims a tx once the score delay threshold has elapsed" unit_nextPeerAction_claimsAtScoreDelayThreshold @@ -1565,6 +1577,667 @@ prop_nextPeerAction_prioritisesSubmit (ArbTxDecisionPolicy policy) (Positive pee } (peerAction, peerState', sharedState') = nextPeerAction now policy peeraddr peerState0 sharedState0 +-- | Trigger kinds for the model-based 'nextPeerAction' property. Each +-- describes one action that should be choosable at the moment the test +-- calls 'nextPeerAction'. The state-builder turns the list into a +-- consistent ('PeerTxLocalState', 'SharedTxState') pair. +data ActionTrigger + = TSubmittable TxId (Positive Int) -- buffered + owned + body-downloaded + | TFetchable TxId (Positive Int) -- claimable + advertised + in available + | TAckable TxId -- in 'sharedRetainedTxs' + unacked queue + | TFetchableLater (Positive Int) TxId (Positive Int) + -- ^ delay-in-seconds + txid + size: claimable only after the loop has + -- advanced 'time' by at least the delay; otherwise just like + -- 'TFetchable'. + deriving (Eq, Show) + +instance Arbitrary ActionTrigger where + arbitrary = oneof + [ TSubmittable <$> arbitrary <*> arbitrary + , TFetchable <$> arbitrary <*> arbitrary + , TAckable <$> arbitrary + , TFetchableLater <$> arbitrary <*> arbitrary <*> arbitrary + ] + -- Shrink only by demoting to a simpler trigger constructor and at most + -- one numeric-field alternative. Avoids the combinatorial blow-up that + -- recursive 'shrink' on every numeric field produces, which made QC's + -- shrinker hundreds of steps slow on rich trigger lists. + shrink (TSubmittable t s) = + [ TFetchable t s, TAckable t ] + ++ [ TSubmittable t' s | t' <- take 1 (shrink t) ] + shrink (TFetchable t s) = + TAckable t : + [ TFetchable t' s | t' <- take 1 (shrink t) ] + shrink (TAckable t) = + [ TAckable t' | t' <- take 1 (shrink t) ] + shrink (TFetchableLater d t s) = + [ TFetchable t s, TAckable t ] -- demote: collapse delay + ++ [ TFetchableLater d' t s | d' <- take 1 (shrink d) ] + +triggerTxid :: ActionTrigger -> TxId +triggerTxid (TSubmittable t _) = t +triggerTxid (TFetchable t _) = t +triggerTxid (TAckable t) = t +triggerTxid (TFetchableLater _ t _) = t + +isTSubmittable :: ActionTrigger -> Bool +isTSubmittable TSubmittable{} = True +isTSubmittable _ = False + +isTAckable :: ActionTrigger -> Bool +isTAckable TAckable{} = True +isTAckable _ = False + +isTFetchableNow :: ActionTrigger -> Bool +isTFetchableNow TFetchable{} = True +isTFetchableNow _ = False + +isTFetchableLater :: ActionTrigger -> Bool +isTFetchableLater TFetchableLater{} = True +isTFetchableLater _ = False + +-- | Generator-time bias selector: 'ModeDisjoint' renumbers all triggers +-- so each peer's txid range is unique (no cross-peer overlap), +-- 'ModeShared' collapses txids into a small shared pool so cross-peer +-- overlap arises with high probability. Both modes are sampled so +-- single-peer-shape regressions and cross-peer-guard regressions are +-- both reachable from the corpus. +data OverlapMode = ModeDisjoint | ModeShared + deriving (Eq, Show) + +-- | A scenario is an overlap mode plus a per-peer trigger map. With 1-3 +-- peers, scenarios cover both single-peer behaviour and the cross-peer +-- code paths in 'nextPeerAction' (lease contention, +-- 'txSubmittingAnywhere', advertiser bookkeeping) that the single-peer +-- model could not reach. +data TriggerScenario = + TriggerScenario OverlapMode (Map.Map PeerAddr [ActionTrigger]) + deriving (Eq, Show) + +-- | Per-peer trigger-list generator. Singleton is common; empty and +-- large lists are explicitly represented; the per-element generator +-- biases toward uniform-of-one-class so all-fetchable / all-ackable +-- scenarios arise often enough to exercise their code paths. +genPerPeerTriggers :: Gen [ActionTrigger] +genPerPeerTriggers = do + size <- frequency + [ (2, pure 1) -- singleton + , (1, pure 0) -- empty + , (3, choose (2, 10)) -- small + , (1, choose (11, 100)) -- larger + ] + genElem <- oneof + [ pure arbitrary + , pure (TFetchable <$> arbitrary <*> arbitrary) + , pure (TSubmittable <$> arbitrary <*> arbitrary) + , pure (TAckable <$> arbitrary) + , pure (TFetchableLater <$> arbitrary <*> arbitrary <*> arbitrary) + ] + vectorOf size genElem + +-- | Size-first shrink for a per-peer trigger list: halve, drop one, +-- element-shrink. Same behaviour as the original 'shrink' for +-- 'TriggerScenario' before the multi-peer refactor. +shrinkTriggerList :: [ActionTrigger] -> [[ActionTrigger]] +shrinkTriggerList ts = + let n = length ts in + [ take (n `div` 2) ts | n >= 2 ] + ++ [ drop (n `div` 2) ts | n >= 2 ] + ++ [ take i ts ++ drop (i + 1) ts | i <- [0 .. n - 1] ] + ++ [ take i ts ++ t' : drop (i + 1) ts + | i <- [0 .. n - 1] + , t' <- shrink (ts !! i) + ] + +-- | Replace every 'ActionTrigger's txid so each peer's range is unique +-- across peers. Guarantees zero cross-peer overlap regardless of the raw +-- arbitrary txids. +renumberDisjoint :: [[ActionTrigger]] -> [[ActionTrigger]] +renumberDisjoint = snd . mapAccumL renumberOne 1 + where + renumberOne nextId triggers = + let n = length triggers in + (nextId + n, zipWith setTxid triggers [nextId .. nextId + n - 1]) + +-- | Replace every 'ActionTrigger's txid with a draw from a shared pool +-- of size 'poolSize'. Within-peer collisions get deduped by the +-- subsequent 'normaliseTriggers'; cross-peer collisions become the +-- overlap that the multi-peer test is designed to exercise. +collapseToPool :: Int -> [[ActionTrigger]] -> Gen [[ActionTrigger]] +collapseToPool poolSize = traverse (traverse remap) + where + remap trig = do + newId <- chooseInt (1, poolSize) + pure (setTxid trig newId) + +setTxid :: ActionTrigger -> TxId -> ActionTrigger +setTxid (TSubmittable _ s) t = TSubmittable t s +setTxid (TFetchable _ s) t = TFetchable t s +setTxid (TAckable _) t = TAckable t +setTxid (TFetchableLater d _ s) t = TFetchableLater d t s + +instance Arbitrary TriggerScenario where + arbitrary = do + -- Peer-count distribution: weighted toward 1-2 peers so the simpler + -- single-peer cases remain well-represented while 3-peer scenarios + -- still arise often enough to surface multi-claim contention. + nPeers <- frequency + [ (2, pure 1) + , (2, pure 2) + , (1, pure 3) + ] + perPeer <- vectorOf nPeers genPerPeerTriggers + -- Mode-frequency 2:3 ensures both modes are well-represented; + -- 'ModeShared' is weighted slightly higher because cross-peer + -- overlap is the harder-to-reach coverage class. + mode <- frequency + [ (2, pure ModeDisjoint) + , (3, pure ModeShared) + ] + remapped <- case mode of + ModeDisjoint -> pure (renumberDisjoint perPeer) + ModeShared -> + let totalT = sum (map length perPeer) + poolSz = max 1 (totalT `div` 2) in + collapseToPool poolSz perPeer + pure (TriggerScenario mode + (Map.fromList (zip [1 .. nPeers] remapped))) + -- Shrink keeps the mode and shrinks structure: drop a peer first, + -- then shrink one peer's list. Keeping at least one peer preserves + -- well-formedness; mode is locked at generation so shrinks never + -- migrate between disjoint and shared. + shrink (TriggerScenario mode m) = + [ TriggerScenario mode (Map.delete p m) + | Map.size m > 1 + , p <- Map.keys m + ] + ++ [ TriggerScenario mode (Map.insert p ts' m) + | (p, ts) <- Map.toList m + , ts' <- shrinkTriggerList ts + ] + +-- | Strongest trigger category seen across all peers for a given txid. +-- Drives both the merged 'TxEntry' shape and the global expected-action +-- assertion in 'prop_nextPeerAction_processesAllTriggers'. +data TxCategory = CatSubmit | CatFetch | CatAck + deriving (Eq, Show) + +categoryOf :: [ActionTrigger] -> TxCategory +categoryOf trigs + | any isTSubmittable trigs = CatSubmit + | any isTFetchableNow trigs || any isTFetchableLater trigs = CatFetch + | otherwise = CatAck + +hasActiveEntry :: ActionTrigger -> Bool +hasActiveEntry TAckable{} = False +hasActiveEntry _ = True + +-- | Build a consistent multi-peer state from a per-peer trigger map. +-- Each peer's list must be normalised; triggers are globally re-keyed so +-- a txid mentioned by multiple peers gets a single shared 'TxKey' and +-- their 'TxEntry' merges advertiser count, attempts, and lease. +-- +-- For a txid: +-- * if any peer has 'TSubmittable', the lowest-numbered such peer holds +-- the lease ('TxLeased') and every TSubmittable peer's attempt is +-- 'TxBuffered'; +-- * else if any peer has 'TFetchableLater', the lease is 'TxClaimable' +-- delayed by the first such trigger's delay; +-- * else if any peer has 'TFetchable', the lease is 'TxClaimable' now; +-- * else (only 'TAckable' across all peers) the txid lives in +-- 'sharedRetainedTxs' and has no active-table entry. +buildTriggerState :: TxDecisionPolicy + -> Map.Map PeerAddr [ActionTrigger] + -> ( Map.Map PeerAddr (PeerTxLocalState (Tx TxId)) + , SharedTxState PeerAddr TxId + ) +buildTriggerState policy perPeer = + (peerStates, sharedState0) + where + -- Global txid order: peer-ascending, then per-peer trigger order; + -- first appearance wins. Stable so the same scenario always builds + -- the same state. + allTxids :: [TxId] + allTxids = nub + [ triggerTxid t + | (_, ts) <- Map.toAscList perPeer + , t <- ts + ] + + txidToKey :: Map.Map TxId Int + txidToKey = Map.fromList (zip allTxids [0..]) + + txidKey :: TxId -> Int + txidKey t = txidToKey Map.! t + + triggersFor :: TxId -> [(PeerAddr, ActionTrigger)] + triggersFor txid = + [ (p, t) + | (p, ts) <- Map.toAscList perPeer + , t <- ts + , triggerTxid t == txid + ] + + submittingPeer :: TxId -> Maybe PeerAddr + submittingPeer txid = listToMaybe + [ p | (p, t) <- triggersFor txid, isTSubmittable t ] + + laterDelay :: TxId -> Maybe Int + laterDelay txid = listToMaybe + [ d | (_, TFetchableLater (Positive d) _ _) <- triggersFor txid ] + + mkEntry :: TxId -> TxEntry PeerAddr + mkEntry txid = + let trigs = triggersFor txid + activeCount = length (filter (hasActiveEntry . snd) trigs) + attempts = Map.fromList + [ (p, TxBuffered) | (p, t) <- trigs, isTSubmittable t ] + lease = case submittingPeer txid of + Just p -> TxLeased p (addTime 10 now) + Nothing -> case laterDelay txid of + Just d -> TxClaimable (addTime (fromIntegral d) now) + Nothing -> TxClaimable now in + TxEntry + { txLease = lease + , txAdvertiserCount = activeCount + , txAttempts = attempts + , currentMaxInflightMultiplicity = txInflightMultiplicity policy + } + + activeTxids = [ txid | txid <- allTxids + , categoryOf (map snd (triggersFor txid)) /= CatAck ] + retainedTxids = [ txid | txid <- allTxids + , categoryOf (map snd (triggersFor txid)) == CatAck ] + + retainedUntil = addTime 600 now + retainedEntries = [ (txidKey txid, retainedUntil) | txid <- retainedTxids ] + + mkPeerLocal :: PeerAddr -> [ActionTrigger] -> PeerTxLocalState (Tx TxId) + mkPeerLocal _ ts = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.fromList + [ TxKey (txidKey (triggerTxid t)) | t <- ts ] + , peerDownloadedTxs = IntMap.fromList + [ (txidKey t', mkTx t' (mkSize s)) + | TSubmittable t' s <- ts + ] + , peerAvailableTxIds = IntMap.fromList $ + [ (txidKey t', mkSize s) | TFetchable t' s <- ts ] + ++ [ (txidKey t', mkSize s) | TFetchableLater _ t' s <- ts ] + } + + peerStates = Map.mapWithKey mkPeerLocal perPeer + + mkSharedPeer :: PeerAddr -> [ActionTrigger] -> SharedPeerState + mkSharedPeer _ ts = + let advKeys = [ TxKey (txidKey (triggerTxid t)) + | t <- ts, hasActiveEntry t ] in + withAdvertisedTxKeys advKeys (mkSharedPeerState PeerIdle) + + sharedPeers' = Map.mapWithKey mkSharedPeer perPeer + + sharedState0 = emptySharedTxState + { sharedPeers = sharedPeers' + , sharedTxTable = IntMap.fromList + [ (txidKey txid, mkEntry txid) | txid <- activeTxids ] + , sharedTxIdToKey = Map.fromList + [ (getRawTxId txid, TxKey (txidKey txid)) | txid <- allTxids ] + , sharedKeyToTxId = IntMap.fromList + [ (txidKey txid, txid) | txid <- allTxids ] + , sharedNextTxKey = length allTxids + , sharedRetainedTxs = retainedFromList retainedEntries + } + +-- | Normalise a per-peer trigger list: shift txids to >= 1, dedupe by +-- the post-shift txid, and reorder so ackables come first, then +-- submittables, then fetchables. The order matters because +-- 'pickSubmitAction' walks 'peerUnacknowledgedTxIds' in order and stops +-- at the first non-submittable-but-known tx, and 'pickRequestTxIdsAction' +-- takes the longest ackable prefix; with ackables first every ackable is +-- reached, and the submit walk skips ackables (no shared-table entry) to +-- pick up submittables before stopping at the first fetchable. +normaliseTriggers :: [ActionTrigger] -> [ActionTrigger] +normaliseTriggers = + orderTriggers + . nubBy ((==) `on` triggerTxid) + . map shiftTrigger + where + shiftTrigger (TSubmittable t s) = TSubmittable (abs t + 1) s + shiftTrigger (TFetchable t s) = TFetchable (abs t + 1) s + shiftTrigger (TAckable t) = TAckable (abs t + 1) + shiftTrigger (TFetchableLater d t s) = TFetchableLater d (abs t + 1) s + orderTriggers ts = + filter isTAckable ts + ++ filter isTSubmittable ts + ++ filter isTFetchableNow ts + ++ filter isTFetchableLater ts + +-- | Across peers, ensure each txid has 'TSubmittable' from at most one +-- peer. The lowest-numbered peer keeps the 'TSubmittable'; others get +-- demoted to 'TFetchable' with the same size. This avoids invariant- +-- violating initial states where two peers both hold the body for the +-- same tx with one as the lease-holder. +dedupeAcrossPeers :: Map.Map PeerAddr [ActionTrigger] + -> Map.Map PeerAddr [ActionTrigger] +dedupeAcrossPeers m = Map.mapWithKey (map . demote) m + where + primarySubmitter :: Map.Map TxId PeerAddr + primarySubmitter = Map.fromListWith min + [ (t, p) | (p, ts) <- Map.toList m, TSubmittable t _ <- ts ] + demote p (TSubmittable t s) + | Map.lookup t primarySubmitter /= Just p = TFetchable t s + demote _ trig = trig + +-- | Per-peer normalise plus cross-peer dedupe, in that order. +normaliseScenario :: Map.Map PeerAddr [ActionTrigger] + -> Map.Map PeerAddr [ActionTrigger] +normaliseScenario = dedupeAcrossPeers . Map.map normaliseTriggers + +-- | Drives 'nextPeerAction' for every peer in the scenario, advancing +-- the earliest-wake peer at each step. After any action that mutates +-- shared state (Submit / RequestTxs / RequestTxIds), other peers are +-- reactivated so they can re-evaluate against the new shared state. +-- Asserts: +-- +-- 1. The loop terminates within the iteration budget. +-- 2. Every txid whose strongest cross-peer trigger category is +-- 'CatSubmit' appears in the union of submitted keys. +-- 3. Every txid whose category is 'CatFetch' appears in the union of +-- requested keys. +-- 4. Every txid whose category is 'CatAck' appears in the union of +-- acked keys. +-- 5. 'combinedStateInvariant' holds on the initial state for every +-- peer and after the acting peer's update at every step. +prop_nextPeerAction_processesAllTriggers + :: ArbTxDecisionPolicy + -> TriggerScenario + -> Property +prop_nextPeerAction_processesAllTriggers + (ArbTxDecisionPolicy arbPolicy) (TriggerScenario mode rawPerPeer) = + tabulate "trigger count" [bucket totalTriggers] + . tabulate "peer count" [show nPeers] + . tabulate "iterations" [bucket iterations] + . tabulate "shared txids" [bucket sharedTxidCount] + . tabulate "overlap mode" [show mode] + $ conjoin + [ counterexample "loop must terminate within step budget" + $ property terminated + , counterexample "submitted set must equal CatSubmit txids" + $ allSubmitted === expectedSubmitted + , counterexample "requested set must equal CatFetch txids" + $ allRequested === expectedFetched + , counterexample + ("expected acks missing: " ++ show (IntSet.toList missingAcks)) + $ property (IntSet.null missingAcks) + , conjoin + [ counterexample ("initial invariant for peer " ++ show p) + $ combinedStateInvariant policy StrongInvariant p ps0 sharedState0 + | (p, ps0) <- Map.toList peerStates0 + ] + , conjoin + [ counterexample + ("invariant after step " ++ show n + ++ " (peer " ++ show p ++ ")") inv + | (n, p, inv) <- stateInvariants + ] + , conjoin + [ checkQuiescence p ps + | (p, (Nothing, ps)) <- Map.toList finalSchedule + ] + ] + where + perPeer = normaliseScenario rawPerPeer + totalTriggers = sum (map length (Map.elems perPeer)) + nPeers = Map.size perPeer + + -- Number of txids that appear in two or more peers' trigger lists. + -- A non-zero value confirms the multi-peer apparatus is actually + -- exercising cross-peer overlap rather than running independent + -- single-peer scenarios in parallel. + txidPeerCounts :: Map.Map TxId Int + txidPeerCounts = Map.fromListWith (+) + [ (triggerTxid t, 1 :: Int) + | (_, ts) <- Map.toList perPeer + , t <- ts + ] + sharedTxidCount = length + [ () | (_, n) <- Map.toList txidPeerCounts, n >= 2 ] + + policy = arbPolicy + { txInflightMultiplicity = 2 + , txsSizeInflightPerPeer = + max_TX_SIZE * fromIntegral (max 1 totalTriggers) + , maxOutstandingTxBatchesPerPeer = max 1 totalTriggers + } + + -- Per-peer interleaving plus lease-expiry cycles inflate iteration + -- counts versus the single-peer case; the budget grows linearly with + -- the product so each peer can claim every advertised tx without + -- exhausting the budget. + maxIters = 100 + 6 * totalTriggers * max 1 nPeers + + -- Global per-txid expected categories. + allTxids = nub + [ triggerTxid t | (_, ts) <- Map.toAscList perPeer, t <- ts ] + txidToKey = Map.fromList (zip allTxids [0..]) + txidKey t = txidToKey Map.! t + + triggersFor txid = + [ trig | (_, ts) <- Map.toAscList perPeer + , trig <- ts + , triggerTxid trig == txid ] + catFor txid = categoryOf (triggersFor txid) + + -- With deferred body delivery, fetched txids run the full + -- Fetch -> Buffer -> Submit cycle, so 'CatFetch' txids contribute + -- to both expectedSubmitted and expectedFetched. + expectedSubmitted = IntSet.fromList + [ txidKey t | t <- allTxids + , let c = catFor t, c == CatSubmit || c == CatFetch ] + expectedFetched = IntSet.fromList + [ txidKey t | t <- allTxids, catFor t == CatFetch ] + expectedAcked = IntSet.fromList + [ txidKey t | t <- allTxids, catFor t == CatAck ] + + (peerStates0, sharedState0) = buildTriggerState policy perPeer + + initialSchedule :: Map.Map PeerAddr (Maybe Time, PeerTxLocalState (Tx TxId)) + initialSchedule = Map.map (\ps -> (Just now, ps)) peerStates0 + + (allSubmitted, allRequested, allAcked, + stateInvariants, terminated, iterations, + finalSchedule, finalSS, finalTime) = + runLoop sharedState0 initialSchedule Map.empty + IntSet.empty IntSet.empty IntSet.empty + [] 0 now + + missingAcks = expectedAcked `IntSet.difference` allAcked + + -- Pick the active peer (status 'Just t') with the smallest 't'. + -- Returns 'Nothing' if every peer is parked at 'Nothing' (terminated). + pickEarliest schedule = + case sortBy (compare `on` snd) + [ (p, t) | (p, (Just t, _)) <- Map.toList schedule ] of + [] -> Nothing + (p, t) : _ -> + let (_, ps) = schedule Map.! p in + Just (p, t, ps) + + -- After a state-mutating action, drag every other peer's wake to + -- 'time' (or earlier) so they re-evaluate the new shared state. A + -- peer parked at 'Nothing' (previously terminated) is reactivated. + reactivateOthers acting time = + Map.mapWithKey $ \p (status, ps) -> + if p == acting + then (status, ps) + else case status of + Just t' | t' <= time -> (Just t', ps) + _ -> (Just time, ps) + + -- Build the (txid, body) pair for a requested key by looking up the + -- txid in shared state and the size in the requesting peer's + -- pre-action 'peerAvailableTxIds'. Deferring delivery means we have + -- to capture sizes before 'applyRequestTxsChoice' moves the keys + -- out of 'peerAvailableTxIds'. + mkBody :: SharedTxState PeerAddr TxId + -> PeerTxLocalState (Tx TxId) + -> TxKey + -> (TxId, Tx TxId) + mkBody ss ps (TxKey k) = + let txid = sharedKeyToTxId ss IntMap.! k + size = peerAvailableTxIds ps IntMap.! k in + (txid, mkTx txid size) + + -- Quiescence: re-poll a peer parked at terminal status against the + -- final shared state. A truly terminated peer should produce + -- 'PeerDoNothing _ Nothing' with no state mutation. Catches + -- non-determinism in 'nextPeerAction', accidental input mutation + -- on the no-op path, and "fake termination" regressions. + checkQuiescence :: PeerAddr -> PeerTxLocalState (Tx TxId) -> Property + checkQuiescence p ps = + let (action, ps', ss') = nextPeerAction finalTime policy p ps finalSS in + case action of + PeerDoNothing _ Nothing -> + conjoin + [ counterexample ("quiescence: peer " ++ show p ++ " local state changed") + $ ps' === ps + , counterexample ("quiescence: peer " ++ show p ++ " mutated shared state") + $ ss' === finalSS + ] + other -> + counterexample + ("quiescence: peer " ++ show p ++ " produced " ++ show other) + (property False) + + -- runLoop's 'pending' field maps each peer to the bodies queued for + -- its next-iteration delivery. Drain happens just before the peer + -- acts; the request-to-delivery gap gives other peers exactly one + -- scheduling step to observe the in-flight state. + runLoop ss schedule pending subs reqs acks invs i lastTime + | i >= maxIters = + (subs, reqs, acks, reverse invs, False, i, schedule, ss, lastTime) + | otherwise = + case pickEarliest schedule of + Nothing -> + (subs, reqs, acks, reverse invs, True, i, schedule, ss, lastTime) + Just (p, time, ps) -> + let lastTime' = max lastTime time + + -- Drain p's pending body deliveries before its action. + (psPre, ssPre, pendingPre, drainInvs, stepDrain) = + case Map.lookup p pending of + Nothing -> (ps, ss, pending, [], i) + Just deliveries -> + let (_, _, ps2, ss2) = + handleReceivedTxs (const False) time policy p + deliveries ps ss + drainInv = combinedStateInvariant policy + StrongInvariant p ps2 ss2 + stepD = i + 1 in + ( ps2, ss2, Map.delete p pending + , [(stepD, p, drainInv)], stepD ) + + (action, ps', ss') = + nextPeerAction time policy p psPre ssPre + oldUnacked = peerUnacknowledgedTxIds psPre + newUnacked = peerUnacknowledgedTxIds ps' + numAcked = StrictSeq.length oldUnacked + - StrictSeq.length newUnacked + ackedNow = IntSet.fromList $ map unTxKey + $ toList (StrictSeq.take numAcked oldUnacked) + inv = combinedStateInvariant policy + StrongInvariant p ps' ss' + step = stepDrain + 1 in + case action of + PeerDoNothing _ Nothing -> + let schedule' = Map.insert p (Nothing, ps') schedule in + runLoop ss' schedule' pendingPre subs reqs acks + ((step, p, inv) : drainInvs ++ invs) step lastTime' + PeerDoNothing _ (Just delay) -> + let nextWake = addTime (max delay 0.001) time + schedule' = Map.insert p (Just nextWake, ps') schedule in + runLoop ss' schedule' pendingPre subs reqs acks + ((step, p, inv) : drainInvs ++ invs) step lastTime' + PeerSubmitTxs ks -> + let (ps'', ss'') = handleSubmittedTxs time policy p ks [] ps' ss' + postInv = combinedStateInvariant policy + StrongInvariant p ps'' ss'' + others' = reactivateOthers p time schedule + schedule' = Map.insert p (Just time, ps'') others' + subs' = IntSet.union subs + (IntSet.fromList (unTxKey <$> ks)) in + runLoop ss'' schedule' pendingPre subs' reqs acks + ( (step, p, postInv) : (step, p, inv) + : drainInvs ++ invs ) step lastTime' + PeerRequestTxs ks -> + -- Queue the bodies for delivery on p's next iteration. + -- Bodies are built from the pre-action peer state so + -- the size lookup hits 'peerAvailableTxIds' before + -- 'applyRequestTxsChoice' moved the keys out. + let bodies = [ mkBody ssPre psPre k | k <- ks ] + pending' = Map.insert p bodies pendingPre + others' = reactivateOthers p time schedule + schedule' = Map.insert p (Just time, ps') others' + reqs' = IntSet.union reqs + (IntSet.fromList (unTxKey <$> ks)) in + runLoop ss' schedule' pending' subs reqs' acks + ((step, p, inv) : drainInvs ++ invs) step lastTime' + PeerRequestTxIds{} -> + let others' = reactivateOthers p time schedule + schedule' = Map.insert p (Just time, ps') others' + acks' = IntSet.union acks ackedNow in + runLoop ss' schedule' pendingPre subs reqs acks' + ((step, p, inv) : drainInvs ++ invs) step lastTime' + +-- | Policy used by the 'TriggerScenario' meta-tests below. Mirrors the +-- pin in 'prop_nextPeerAction_processesAllTriggers'. +metaPolicy :: TxDecisionPolicy +metaPolicy = defaultTxDecisionPolicy { txInflightMultiplicity = 2 } + +-- | Every generated 'TriggerScenario' produces an initial state that +-- satisfies 'combinedStateInvariant' for every peer. Catches state- +-- builder bugs (wrong txids in maps, missing entries, etc.) before they +-- show up as confusing failures of +-- 'prop_nextPeerAction_processesAllTriggers'. +prop_TriggerScenario_validInitialState :: TriggerScenario -> Property +prop_TriggerScenario_validInitialState (TriggerScenario _ rawPerPeer) = + let perPeer = normaliseScenario rawPerPeer + (states, ss0) = buildTriggerState metaPolicy perPeer in + conjoin + [ counterexample ("invalid initial state for peer " ++ show p) + $ combinedStateInvariant metaPolicy StrongInvariant p ps ss0 + | (p, ps) <- Map.toList states + ] + +-- | Every shrink of a generated scenario is itself valid. Without this, +-- 'prop_nextPeerAction_processesAllTriggers' could shrink into invalid +-- territory and report a bogus counterexample. +prop_TriggerScenario_shrinkPreservesValidity :: TriggerScenario -> Property +prop_TriggerScenario_shrinkPreservesValidity ts = + conjoin + [ prop_TriggerScenario_validInitialState ts' + | ts' <- shrink ts + ] + +-- | Shrinks never grow the total trigger count. Catches shrinker +-- regressions that would slow down or invalidate the main property's +-- shrinking. +prop_TriggerScenario_shrinkSmaller :: TriggerScenario -> Property +prop_TriggerScenario_shrinkSmaller ts@(TriggerScenario _ rawM) = + let n = sum (map length (Map.elems rawM)) in + conjoin + [ counterexample ("shrink grew the trigger count: " ++ show ts') + $ sum (map length (Map.elems rawM')) <= n + | ts'@(TriggerScenario _ rawM') <- shrink ts + ] + +-- | A scenario is not in its own shrink list. Catches shrink-into-self +-- regressions that would loop QuickCheck. +prop_TriggerScenario_shrinkExcludesOriginal :: TriggerScenario -> Property +prop_TriggerScenario_shrinkExcludesOriginal ts = + counterexample "shrink contains the original value" + $ property (ts `notElem` shrink ts) + + -- Verifies that nextPeerAction leases a claimable tx to the best idle -- advertiser and requests its body. prop_nextPeerAction_claimsClaimableTx From 41e1bd16a7209343196fdc601d0dba48462fda6d Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 1 May 2026 10:08:52 +0200 Subject: [PATCH 51/67] fixup: fix checkNoThunks checkNoThunks was always true, unsafeNoThunks wasn't really called. seq forces val to WHNF, but deeper thunks will be detected. --- .../lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 5fedb7bcf99..e67f502a2c1 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -114,8 +114,11 @@ tests = -- | Check that a value has no thunks in its fields. checkNoThunks :: NoThunks a => String -> a -> Property checkNoThunks name val = - let result = unsafeNoThunks val - in counterexample (name ++ ": " ++ show result) $ property True + val `seq` case unsafeNoThunks val of + Nothing -> property True + Just info -> counterexample + (name ++ " contains thunks: " ++ show info) + (property False) -- -- InboundState properties From 0a8f09a6c6f24b395f66cf7a9a3bc313cc029957 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 1 May 2026 10:19:17 +0200 Subject: [PATCH 52/67] fixup: remove prop_nextPeerAction_prioritisesSubmit With no-thunk checks in prop_nextPeerAction_processesAllTriggers the prop_nextPeerAction_prioritisesSubmit isn't needed. --- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 95 ++++++++----------- 1 file changed, 38 insertions(+), 57 deletions(-) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index e67f502a2c1..a063bdc3798 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -77,7 +77,6 @@ tests = , testCaseSteps "nextPeerAction lets another peer claim a fresh tx when the first advertiser is full" unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull , testProperty "handleReceivedTxs handles mixed buffered / omitted / late-retained / late-mempool / pruned txids" prop_handleReceivedTxs , testProperty "handleSubmittedTxs retains accepted and drops rejected" prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected - , testProperty "nextPeerAction prioritises submitting buffered owned txs" prop_nextPeerAction_prioritisesSubmit , testProperty "nextPeerAction processes all multi-peer triggers" prop_nextPeerAction_processesAllTriggers , testProperty "nextPeerAction claims claimable tx for best idle advertiser" prop_nextPeerAction_claimsClaimableTx , testProperty "nextPeerAction claims a released tx from another advertiser" prop_nextPeerAction_claimsRejectedTxFromOtherAdvertiser @@ -1532,54 +1531,6 @@ prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected , retainedLookup k (sharedRetainedTxs sharedState') === Nothing ] --- Verifies that nextPeerAction submits buffered txs owned by the peer before --- taking any other action. -prop_nextPeerAction_prioritisesSubmit - :: ArbTxDecisionPolicy - -> Positive Int - -> TxId - -> Positive Int - -> Property -prop_nextPeerAction_prioritisesSubmit (ArbTxDecisionPolicy policy) (Positive peeraddr) txid0 txSize0 = - case peerAction of - PeerSubmitTxs [txKey] -> - conjoin - [ txKey === key - , peerState' === peerState0 - -- Submit selection atomically marks the chosen tx as TxSubmitting - -- so concurrent peer decisions exclude it. - , sharedState' === markSubmittingTxs peeraddr [key] sharedState0 - , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedState'" sharedState' - ] - _ -> counterexample ("unexpected peer action: " ++ show peerAction) False - where - txid = abs txid0 + 1 - txSize = mkSize txSize0 - tx = mkTx txid txSize - key = TxKey 0 - k = unTxKey key - sharedState0 = emptySharedTxState - { sharedPeers = - Map.singleton peeraddr - (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertiserCount = 1 - , txAttempts = Map.singleton peeraddr TxBuffered - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - } - , sharedTxIdToKey = Map.singleton (getRawTxId txid) key - , sharedKeyToTxId = IntMap.singleton k txid - , sharedNextTxKey = 1 - } - peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerRequestedTxIds = maxNumTxIdsToRequest policy - , peerDownloadedTxs = IntMap.singleton k tx - } - (peerAction, peerState', sharedState') = nextPeerAction now policy peeraddr peerState0 sharedState0 - -- | Trigger kinds for the model-based 'nextPeerAction' property. Each -- describes one action that should be choosable at the moment the test -- calls 'nextPeerAction'. The state-builder turns the list into a @@ -2133,9 +2084,19 @@ prop_nextPeerAction_processesAllTriggers let (_, _, ps2, ss2) = handleReceivedTxs (const False) time policy p deliveries ps ss - drainInv = combinedStateInvariant policy - StrongInvariant p ps2 ss2 - stepD = i + 1 in + stepD = i + 1 + drainInv = conjoin + [ combinedStateInvariant policy + StrongInvariant p ps2 ss2 + , checkNoThunks + ("peerState after drain (peer " + ++ show p ++ ", step " ++ show stepD ++ ")") + ps2 + , checkNoThunks + ("sharedState after drain (peer " + ++ show p ++ ", step " ++ show stepD ++ ")") + ss2 + ] in ( ps2, ss2, Map.delete p pending , [(stepD, p, drainInv)], stepD ) @@ -2147,9 +2108,19 @@ prop_nextPeerAction_processesAllTriggers - StrictSeq.length newUnacked ackedNow = IntSet.fromList $ map unTxKey $ toList (StrictSeq.take numAcked oldUnacked) - inv = combinedStateInvariant policy - StrongInvariant p ps' ss' - step = stepDrain + 1 in + step = stepDrain + 1 + inv = conjoin + [ combinedStateInvariant policy + StrongInvariant p ps' ss' + , checkNoThunks + ("peerState' (peer " ++ show p + ++ ", step " ++ show step ++ ")") + ps' + , checkNoThunks + ("sharedState' (peer " ++ show p + ++ ", step " ++ show step ++ ")") + ss' + ] in case action of PeerDoNothing _ Nothing -> let schedule' = Map.insert p (Nothing, ps') schedule in @@ -2162,8 +2133,18 @@ prop_nextPeerAction_processesAllTriggers ((step, p, inv) : drainInvs ++ invs) step lastTime' PeerSubmitTxs ks -> let (ps'', ss'') = handleSubmittedTxs time policy p ks [] ps' ss' - postInv = combinedStateInvariant policy - StrongInvariant p ps'' ss'' + postInv = conjoin + [ combinedStateInvariant policy + StrongInvariant p ps'' ss'' + , checkNoThunks + ("peerState'' (peer " ++ show p + ++ ", step " ++ show step ++ ")") + ps'' + , checkNoThunks + ("sharedState'' (peer " ++ show p + ++ ", step " ++ show step ++ ")") + ss'' + ] others' = reactivateOthers p time schedule schedule' = Map.insert p (Just time, ps'') others' subs' = IntSet.union subs From bc5bcbaf6a20bf7010a40317cbdbb57b29251f83 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 1 May 2026 11:18:29 +0200 Subject: [PATCH 53/67] fixup: improve prop_nextPeerAction_claimsClaimableTx --- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 226 +++++++++++++++--- 1 file changed, 191 insertions(+), 35 deletions(-) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index a063bdc3798..fc8453e901b 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -27,12 +27,12 @@ module Test.Ouroboros.Network.TxSubmission.TxLogic import Control.DeepSeq (NFData (rnf)) import Control.Exception (evaluate) -import Control.Monad.Class.MonadTime.SI (Time (..), addTime, diffTime) +import Control.Monad.Class.MonadTime.SI (DiffTime, Time (..), addTime, diffTime) import Data.Foldable (foldl', toList) import Data.Function (on) import Data.IntMap.Strict qualified as IntMap import Data.IntSet qualified as IntSet -import Data.List (mapAccumL, nub, nubBy, sortBy) +import Data.List (elemIndex, mapAccumL, nub, nubBy, sortBy) import Data.Map.Strict qualified as Map import Data.Maybe (isJust, listToMaybe) import Data.Sequence.Strict qualified as StrictSeq @@ -2222,8 +2222,27 @@ prop_TriggerScenario_shrinkExcludesOriginal ts = $ property (ts `notElem` shrink ts) --- Verifies that nextPeerAction leases a claimable tx to the best idle --- advertiser and requests its body. +-- | Roles for 'prop_nextPeerAction_claimsClaimableTx': 'Good' has no +-- score and can claim; 'Bad' has a 'peerClaimDelay' that pushes its +-- claim past 'now'; 'Confounder' has no relevant local state and is +-- expected to do nothing on any 'nextPeerAction' call. +data PeerRole = Good | Bad | Confounder + deriving (Eq, Show) + +-- | A scheduling order over the three roles. The 'Arbitrary' instance +-- shuffles the three roles uniformly so each of the six permutations +-- is reached. +newtype PeerOrder = PeerOrder [PeerRole] + deriving (Eq, Show) + +instance Arbitrary PeerOrder where + arbitrary = PeerOrder <$> shuffle [Good, Bad, Confounder] + +-- | Verifies that 'Good' (no score) wins the lease regardless of which +-- order the three peers are scheduled. 'Bad' is score-delayed past +-- 'now' so its 'nextPeerAction' yields without claiming; 'Confounder' +-- has no advertised tx and yields trivially. The lease must end up at +-- 'Good' across all six role orderings. prop_nextPeerAction_claimsClaimableTx :: ArbTxDecisionPolicy -> Positive Int @@ -2231,53 +2250,190 @@ prop_nextPeerAction_claimsClaimableTx -> Positive Int -> TxId -> Positive Int + -> Positive Int + -> Positive Int + -> PeerOrder -> Property -prop_nextPeerAction_claimsClaimableTx (ArbTxDecisionPolicy policy) (Positive peerA0) (Positive peerB0) (Positive peerC0) txid0 txSize0 = - distinctPeers ==> - peerTxLocalStateInvariant policy peerState0 .&&. - case peerAction of - PeerRequestTxs txKeys -> - conjoin - [ txKeys === [key] - , peerRequestedTxs peerState' === IntSet.singleton k - , txLease (lookupEntryOrFail key sharedState') === - TxLeased peerA (addTime (interTxSpace policy) now) - , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedState'" sharedState' - ] - _ -> counterexample ("unexpected peer action: " ++ show peerAction) False +prop_nextPeerAction_claimsClaimableTx + (ArbTxDecisionPolicy arbPolicy) + (Positive good0) (Positive bad0) (Positive conf0) + txid0 txSize0 (Positive badScore0) (Positive tDecay0) (PeerOrder order) = + tabulate "order" [show order] + . tabulate "bad score (decayed)" + [bucket (round decayedBadScore :: Int)] + . tabulate "tDecay (s)" [bucket (round tDecaySec :: Int)] + $ conjoin + [ peerTxLocalStateInvariant policy goodPeerState0 + , peerTxLocalStateInvariant policy badPeerState0 + , peerTxLocalStateInvariant policy confPeerState0 + , counterexample + ("Good must claim: " ++ show (lookupResult Good)) $ + case lookupResult Good of + Just (PeerRequestTxs txKeys, _) -> txKeys === [key] + _ -> property False + , counterexample "Good must record the requested tx" $ + case lookupResult Good of + Just (_, ps') -> peerRequestedTxs ps' === IntSet.singleton k + Nothing -> property False + , counterexample + ("Bad must yield with the score-delay derived wake: " + ++ show (lookupResult Bad)) $ + case lookupResult Bad of + Just (PeerDoNothing _ (Just delay), _) -> + -- Tolerance absorbs sub-picosecond FP drift from + -- production's 'Double' score arithmetic. 1ns is well + -- below any production-relevant timing distinction. + let diff = abs (delay - expectedBadDelay) in + counterexample + ("delay = " ++ show delay + ++ ", expected = " ++ show expectedBadDelay + ++ ", diff = " ++ show diff) + (property (diff < 1e-9)) + other -> + counterexample ("got: " ++ show other) (property False) + , counterexample + ("Confounder must do nothing with no scheduled wake: " + ++ show (lookupResult Confounder)) $ + case lookupResult Confounder of + Just (PeerDoNothing _ Nothing, _) -> property True + _ -> property False + , counterexample "Lease must end up at Good" $ + txLease (lookupEntryOrFail key sharedStateFinal) === + TxLeased goodPeer (addTime (interTxSpace policy) now) + , checkNoThunks "sharedStateFinal" sharedStateFinal + , conjoin + [ checkNoThunks + ("peerState' for " ++ show role) + (ps' :: PeerTxLocalState (Tx TxId)) + | (role, _, ps', _) <- results + ] + , conjoin + [ counterexample + ("combined invariant for " ++ show role) + (combinedStateInvariant policy StrongInvariant + (fst (roleSetup role)) ps' ss') + | (role, _, ps', ss') <- results + ] + ] where - peerA = peerA0 - peerAScore = PeerScore (min 1 (scoreMax policy)) now - peerB = peerB0 + 1000 - peerC = peerC0 + 2000 - distinctPeers = peerA /= peerB && peerA /= peerC && peerB /= peerC - txid = abs txid0 + 1 + -- Pin 'scoreMax' high enough to fit a pre-decay score of + -- (decayed-target + decayAmount) without clamping; bound + -- 'scoreRate' to a non-zero range so the decay arithmetic in + -- 'currentPeerScore' produces an observable change. + policy = arbPolicy + { scoreMax = max 200 (scoreMax arbPolicy) + , scoreRate = max 0.01 (min 1.0 (scoreRate arbPolicy)) + } + + goodPeer = good0 + badPeer = bad0 + 1000 + confPeer = conf0 + 2000 + txid = abs txid0 + 1 txSize = mkSize txSize0 - key = TxKey 0 - k = unTxKey key + key = TxKey 0 + k = unTxKey key + + -- Score decay parameters: 'peerScoreTs' is 'tDecaySec' seconds + -- before 'now', so 'currentPeerScore' takes its decay arm. The + -- pre-decay 'peerScoreValue' is chosen so the decayed value lands + -- in [21..100] -- below the pinned 'scoreMax = 200' and above the + -- 1ms-delay threshold of 'now - claimableAt = 0.001s'. + claimableAt = Time 99.999 + tDecaySec :: Double + tDecaySec = fromIntegral (1 + (tDecay0 - 1) `mod` 10 :: Int) + decayAmount :: Double + decayAmount = tDecaySec * scoreRate policy + -- Decayed score in [21..100] regardless of 'tDecaySec'/'scoreRate'. + decayedBadScore :: Double + decayedBadScore = fromIntegral (21 + (badScore0 - 1) `mod` 80 :: Int) + badInitialScore :: Double + badInitialScore = decayedBadScore + decayAmount + badPeerScore = PeerScore + { peerScoreValue = badInitialScore + , peerScoreTs = addTime (negate (realToFrac tDecaySec)) now + } + -- Bad's wake delay depends on whether Good claimed before Bad ran: + -- * Bad runs while the lease is 'TxClaimable claimableAt': wake at + -- 'claimableAt + peerClaimDelay', delay relative to 'now'. + -- * Bad runs after Good claimed (lease is 'TxLeased Good + -- (now + interTxSpace)'): wake at 'leaseUntil + peerClaimDelay', + -- delay = 'interTxSpace + peerClaimDelay'. + -- The score-delay formula '/ 20000' is replicated independently + -- here (rather than imported) so divergence in the production + -- 'peerClaimDelay' surfaces as a delay mismatch. + badRunsBeforeGood = case (elemIndex Bad order, elemIndex Good order) of + (Just bi, Just gi) -> bi < gi + _ -> False + -- Independent score-delay formula using the *decayed* score: this + -- replicates production's 'peerClaimDelay (currentPeerScore _ _)' + -- in two steps so divergences in either decay direction (S11) or + -- the '/ 20000' divisor (S14) surface as a delay mismatch. + badClaimDelay :: DiffTime + badClaimDelay = realToFrac (decayedBadScore / 20000) + expectedBadDelay :: DiffTime + expectedBadDelay + | badRunsBeforeGood = badClaimDelay - diffTime now claimableAt + | otherwise = badClaimDelay + interTxSpace policy + sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (peerC, withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs)) + [ (goodPeer, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (badPeer, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (confPeer, mkSharedPeerState PeerIdle) ] , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxClaimable (Time 0) - , txAdvertiserCount = 3 + { txLease = TxClaimable claimableAt + , txAdvertiserCount = 2 , txAttempts = Map.empty , currentMaxInflightMultiplicity = txInflightMultiplicity policy } } - peerState0 = emptyPeerTxLocalState + + goodPeerState0 = emptyPeerTxLocalState { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerAvailableTxIds = IntMap.singleton k txSize - , peerScore = peerAScore + , peerAvailableTxIds = IntMap.singleton k txSize } - (peerAction, peerState', sharedState') = nextPeerAction now policy peerA peerState0 sharedState0 + badPeerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerAvailableTxIds = IntMap.singleton k txSize + , peerScore = badPeerScore + } + -- 'peerRequestedTxIds' pinned at the per-peer cap so Confounder has + -- no headroom for a 'PeerRequestTxIds' action; combined with empty + -- 'peerAvailableTxIds'/'peerUnacknowledgedTxIds'/'peerDownloadedTxs' + -- and no advertised keys, Confounder has genuinely nothing to do + -- and must yield 'PeerDoNothing _ Nothing'. + confPeerState0 = emptyPeerTxLocalState + { peerRequestedTxIds = maxNumTxIdsToRequest policy + } + + roleSetup Good = (goodPeer, goodPeerState0) + roleSetup Bad = (badPeer, badPeerState0) + roleSetup Confounder = (confPeer, confPeerState0) + + -- Run nextPeerAction for each peer in the generated order, + -- threading shared state. Records the post-action peer state and + -- shared state per role so each peer's joint + -- '(ps', ss')' can be checked by 'combinedStateInvariant'. + runOne (ss, acc) role = + let (peer, ps0) = roleSetup role + (action, ps', ss') = nextPeerAction now policy peer ps0 ss + in (ss', (role, action, ps', ss') : acc) + + (sharedStateFinal, resultsRev) = foldl' runOne (sharedState0, []) order + results :: [( PeerRole + , PeerAction + , PeerTxLocalState (Tx TxId) + , SharedTxState PeerAddr TxId )] + results = reverse resultsRev + + lookupResult :: PeerRole + -> Maybe (PeerAction, PeerTxLocalState (Tx TxId)) + lookupResult role = listToMaybe + [ (action, ps') | (r, action, ps', _) <- results, r == role ] -- | A peer's score decays linearly at 'scoreRate' from its last -- timestamped value, clamped to zero. From 54f7779303b3261196913444544ca92b69ddb666 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 1 May 2026 11:31:26 +0200 Subject: [PATCH 54/67] fixup: convert trivial property to unit test Convert trivial nextPeerAction_claimsRejectedTxFromOtherAdvertiser to a unit tests. --- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 93 ++++++++++++++----- 1 file changed, 72 insertions(+), 21 deletions(-) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index fc8453e901b..66f1170cded 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -79,7 +79,7 @@ tests = , testProperty "handleSubmittedTxs retains accepted and drops rejected" prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected , testProperty "nextPeerAction processes all multi-peer triggers" prop_nextPeerAction_processesAllTriggers , testProperty "nextPeerAction claims claimable tx for best idle advertiser" prop_nextPeerAction_claimsClaimableTx - , testProperty "nextPeerAction claims a released tx from another advertiser" prop_nextPeerAction_claimsRejectedTxFromOtherAdvertiser + , testCaseSteps "nextPeerAction claims a released tx from another advertiser" unit_nextPeerAction_claimsRejectedTxFromOtherAdvertiser , testCaseSteps "nextPeerAction claims a tx once the score delay threshold has elapsed" unit_nextPeerAction_claimsAtScoreDelayThreshold , testCaseSteps "peerScore decays linearly over time at scoreRate" unit_peerScore_decaysOverTime , testCaseSteps "applyPeerRejections drains the existing score before adding the new rejection count" unit_applyPeerRejections_drainsThenAdds @@ -119,6 +119,26 @@ checkNoThunks name val = (name ++ " contains thunks: " ++ show info) (property False) +-- | HUnit equivalent of 'checkNoThunks'. +assertNoThunks :: NoThunks a => String -> a -> Assertion +assertNoThunks name val = + val `seq` case unsafeNoThunks val of + Nothing -> pure () + Just info -> assertFailure (name ++ " contains thunks: " ++ show info) + +-- | Evaluate a 'Property' once (no QuickCheck shrinking) and convert +-- the verdict into an 'Assertion'. Useful for invariant helpers like +-- 'combinedStateInvariant' that return 'Property' so they're directly +-- usable from 'testProperty', but need bridging to 'testCaseSteps'. +assertProperty :: Testable prop => String -> prop -> Assertion +assertProperty name prop = do + result <- quickCheckWithResult + stdArgs { chatty = False, maxSuccess = 1 } + prop + case result of + Success {} -> pure () + _ -> assertFailure (name ++ ": " ++ output result) + -- -- InboundState properties -- @@ -2513,15 +2533,57 @@ unit_nextPeerAction_claimsAtScoreDelayThreshold step = do -- 'nextPeerAction' path: once no peer has an outstanding attempt on a -- tx and its lease is claimable, a still-advertising peer is eligible -- to re-claim. -prop_nextPeerAction_claimsRejectedTxFromOtherAdvertiser :: Property -prop_nextPeerAction_claimsRejectedTxFromOtherAdvertiser = - counterexample "peerB should be able to claim the released tx" - $ case action of - PeerRequestTxs keys -> - counterexample ("keys requested: " ++ show keys) - $ TxKey txKeyInt `elem` keys - _ -> - counterexample ("peerB action: " ++ show action) False +unit_nextPeerAction_claimsRejectedTxFromOtherAdvertiser + :: (String -> IO ()) -> Assertion +unit_nextPeerAction_claimsRejectedTxFromOtherAdvertiser step = do + step "Run handleSubmittedTxs with peerA rejecting the tx" + let (peerAStateAfter, sharedStateAfterRejection) = + handleSubmittedTxs now defaultTxDecisionPolicy peerA + [] + [TxKey txKeyInt] + peerAState + sharedState0 + entryAfterRejection = + lookupEntryOrFail (TxKey txKeyInt) sharedStateAfterRejection + + step "Assert peerA's downloadedTxs is cleared" + peerDownloadedTxs peerAStateAfter @?= IntMap.empty + + step "Assert the lease is released and peerA leaves the advertiser set" + txLease entryAfterRejection @?= TxClaimable now + txAttempts entryAfterRejection @?= Map.empty + txAdvertiserCount entryAfterRejection @?= 1 + + step "Assert combinedStateInvariant for peerA after rejection" + assertProperty "combinedStateInvariant peerA after rejection" $ + combinedStateInvariant defaultTxDecisionPolicy StrongInvariant + peerA peerAStateAfter sharedStateAfterRejection + + step "Assert NoThunks on post-rejection states" + assertNoThunks "peerAStateAfter" peerAStateAfter + assertNoThunks "sharedStateAfterRejection" sharedStateAfterRejection + + step "Run nextPeerAction for peerB" + let (action, peerBStateAfter, sharedStateFinal) = + nextPeerAction now defaultTxDecisionPolicy peerB peerBState + sharedStateAfterRejection + + step "Assert peerB claims the released tx exclusively" + action @?= PeerRequestTxs [TxKey txKeyInt] + peerRequestedTxs peerBStateAfter @?= IntSet.singleton txKeyInt + + step "Assert peerB now holds the lease" + txLease (lookupEntryOrFail (TxKey txKeyInt) sharedStateFinal) @?= + TxLeased peerB (addTime (interTxSpace defaultTxDecisionPolicy) now) + + step "Assert combinedStateInvariant for peerB after claim" + assertProperty "combinedStateInvariant peerB after claim" $ + combinedStateInvariant defaultTxDecisionPolicy StrongInvariant + peerB peerBStateAfter sharedStateFinal + + step "Assert NoThunks on post-claim states" + assertNoThunks "peerBStateAfter" peerBStateAfter + assertNoThunks "sharedStateFinal" sharedStateFinal where peerA, peerB :: PeerAddr peerA = 1 @@ -2568,17 +2630,6 @@ prop_nextPeerAction_claimsRejectedTxFromOtherAdvertiser = , peerAvailableTxIds = IntMap.singleton txKeyInt txSize } - (_peerAStateAfter, sharedStateAfterRejection) = - handleSubmittedTxs now defaultTxDecisionPolicy peerA - [] - [TxKey txKeyInt] - peerAState - sharedState0 - - (action, _peerBStateAfter, _sharedStateFinal) = - nextPeerAction now defaultTxDecisionPolicy peerB peerBState - sharedStateAfterRejection - -- Verifies that nextPeerAction can steal an expired lease for the best idle -- advertiser and request that tx. prop_nextPeerAction_claimsExpiredLease From 73a68a730da9e832fb1e1eb2558fbe5f7a9c29c5 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 1 May 2026 11:45:33 +0200 Subject: [PATCH 55/67] fixup: remove prop_nextPeerAction_claimsExpiredLease Fold prop_nextPeerAction_claimsExpiredLease into prop_nextPeerAction_claimsClaimableTx which is expanded to cover both case which is expanded to cover both casess --- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 108 +++++++----------- 1 file changed, 43 insertions(+), 65 deletions(-) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 66f1170cded..7a76db56f79 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -83,7 +83,6 @@ tests = , testCaseSteps "nextPeerAction claims a tx once the score delay threshold has elapsed" unit_nextPeerAction_claimsAtScoreDelayThreshold , testCaseSteps "peerScore decays linearly over time at scoreRate" unit_peerScore_decaysOverTime , testCaseSteps "applyPeerRejections drains the existing score before adding the new rejection count" unit_applyPeerRejections_drainsThenAdds - , testProperty "nextPeerAction steals expired lease for best idle advertiser" prop_nextPeerAction_claimsExpiredLease , testProperty "nextPeerAction requests an oversized first tx within the soft budget" prop_nextPeerAction_requestsOversizedFirstTx , testCaseSteps "nextPeerAction skips blocked available txs and requests later claimable ones" unit_nextPeerAction_skipsBlockedAvailableTxs , testProperty "nextPeerAction submits buffered owned txs before acking" prop_nextPeerAction_ownerSubmitsBuffered @@ -2249,6 +2248,19 @@ prop_TriggerScenario_shrinkExcludesOriginal ts = data PeerRole = Good | Bad | Confounder deriving (Eq, Show) +-- | Initial-lease form for 'prop_nextPeerAction_claimsClaimableTx': +-- 'ClaimableLease' starts with 'TxClaimable claimableAt' (no current +-- holder); 'ExpiredLease' starts with 'TxLeased oldOwner claimableAt' +-- (held by a 'PeerWaitingTxs'-phase peer whose 'leaseUntil' is in the +-- past). Production routes both through 'txClaimReadyAt', so the same +-- claim arithmetic applies in either form -- this axis subsumes the +-- expired-lease scenario into the same property. +data LeaseStart = ClaimableLease | ExpiredLease + deriving (Eq, Show) + +instance Arbitrary LeaseStart where + arbitrary = elements [ClaimableLease, ExpiredLease] + -- | A scheduling order over the three roles. The 'Arbitrary' instance -- shuffles the three roles uniformly so each of the six permutations -- is reached. @@ -2273,15 +2285,18 @@ prop_nextPeerAction_claimsClaimableTx -> Positive Int -> Positive Int -> PeerOrder + -> LeaseStart -> Property prop_nextPeerAction_claimsClaimableTx (ArbTxDecisionPolicy arbPolicy) (Positive good0) (Positive bad0) (Positive conf0) - txid0 txSize0 (Positive badScore0) (Positive tDecay0) (PeerOrder order) = + txid0 txSize0 (Positive badScore0) (Positive tDecay0) (PeerOrder order) + leaseStart = tabulate "order" [show order] . tabulate "bad score (decayed)" [bucket (round decayedBadScore :: Int)] . tabulate "tDecay (s)" [bucket (round tDecaySec :: Int)] + . tabulate "lease start" [show leaseStart] $ conjoin [ peerTxLocalStateInvariant policy goodPeerState0 , peerTxLocalStateInvariant policy badPeerState0 @@ -2395,18 +2410,38 @@ prop_nextPeerAction_claimsClaimableTx | badRunsBeforeGood = badClaimDelay - diffTime now claimableAt | otherwise = badClaimDelay + interTxSpace policy + -- 'oldOwner' is only present in shared state when 'leaseStart = + -- ExpiredLease'. It holds a stale 'TxLeased oldOwner claimableAt' + -- with 'leaseUntil = claimableAt < now', so the lease is expired + -- and any other advertiser whose 'peerClaimDelay' permits can + -- claim. 'nextPeerAction' is never called for 'oldOwner' -- it + -- exists only to bump 'txAdvertiserCount' and supply the + -- expired-lease holder. + oldOwner = good0 + bad0 + conf0 + 3000 + oldOwnerEntries = case leaseStart of + ClaimableLease -> [] + ExpiredLease -> + [ ( oldOwner + , withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs) + ) + ] sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (goodPeer, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (badPeer, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (confPeer, mkSharedPeerState PeerIdle) - ] + $ [ (goodPeer, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (badPeer, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + , (confPeer, mkSharedPeerState PeerIdle) + ] + ++ oldOwnerEntries , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxClaimable claimableAt - , txAdvertiserCount = 2 + { txLease = case leaseStart of + ClaimableLease -> TxClaimable claimableAt + ExpiredLease -> TxLeased oldOwner claimableAt + , txAdvertiserCount = case leaseStart of + ClaimableLease -> 2 + ExpiredLease -> 3 , txAttempts = Map.empty , currentMaxInflightMultiplicity = txInflightMultiplicity policy } @@ -2630,63 +2665,6 @@ unit_nextPeerAction_claimsRejectedTxFromOtherAdvertiser step = do , peerAvailableTxIds = IntMap.singleton txKeyInt txSize } --- Verifies that nextPeerAction can steal an expired lease for the best idle --- advertiser and request that tx. -prop_nextPeerAction_claimsExpiredLease - :: ArbTxDecisionPolicy - -> Positive Int - -> Positive Int - -> Positive Int - -> TxId - -> Positive Int - -> Property -prop_nextPeerAction_claimsExpiredLease (ArbTxDecisionPolicy policy) (Positive oldOwner0) (Positive peerA0) (Positive peerB0) txid0 txSize0 = - distinctPeers ==> - peerTxLocalStateInvariant policy peerState0 .&&. - case peerAction of - PeerRequestTxs txKeys -> - conjoin - [ txKeys === [key] - , peerRequestedTxs peerState' === IntSet.singleton k - , txLease (lookupEntryOrFail key sharedState') === - TxLeased peerA (addTime (interTxSpace policy) now) - , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedState'" sharedState' - ] - _ -> counterexample ("unexpected peer action: " ++ show peerAction) False - where - oldOwner = oldOwner0 - peerA = peerA0 + 1000 - peerAScore = PeerScore (min 1 (scoreMax policy)) now - peerB = peerB0 + 2000 - distinctPeers = oldOwner /= peerA && oldOwner /= peerB && peerA /= peerB - txid = abs txid0 + 1 - txSize = mkSize txSize0 - key = TxKey 0 - k = unTxKey key - sharedState0 = emptySharedTxState - { sharedPeers = Map.fromList - [ (oldOwner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs)) - , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - ] - , sharedTxIdToKey = Map.singleton (getRawTxId txid) key - , sharedKeyToTxId = IntMap.singleton k txid - , sharedNextTxKey = 1 - , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxLeased oldOwner (Time 0) - , txAdvertiserCount = 3 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - } - } - peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerAvailableTxIds = IntMap.singleton k txSize - , peerScore = peerAScore - } - (peerAction, peerState', sharedState') = nextPeerAction now policy peerA peerState0 sharedState0 - -- Verifies that nextPeerAction still requests an oversized first tx when it -- is the only available choice within the soft-budget policy. prop_nextPeerAction_requestsOversizedFirstTx From edb0f618b70fb6c04201eb548295c6afeb63f17a Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 1 May 2026 13:39:51 +0200 Subject: [PATCH 56/67] fixup: improved prop_nextPeerAction_picksTxsRespectingBudget --- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 264 +++++++++++++++--- 1 file changed, 230 insertions(+), 34 deletions(-) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 7a76db56f79..ca3ecb604c9 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -83,7 +83,7 @@ tests = , testCaseSteps "nextPeerAction claims a tx once the score delay threshold has elapsed" unit_nextPeerAction_claimsAtScoreDelayThreshold , testCaseSteps "peerScore decays linearly over time at scoreRate" unit_peerScore_decaysOverTime , testCaseSteps "applyPeerRejections drains the existing score before adding the new rejection count" unit_applyPeerRejections_drainsThenAdds - , testProperty "nextPeerAction requests an oversized first tx within the soft budget" prop_nextPeerAction_requestsOversizedFirstTx + , testProperty "nextPeerAction picks txs respecting the inflight size budget" prop_nextPeerAction_picksTxsRespectingBudget , testCaseSteps "nextPeerAction skips blocked available txs and requests later claimable ones" unit_nextPeerAction_skipsBlockedAvailableTxs , testProperty "nextPeerAction submits buffered owned txs before acking" prop_nextPeerAction_ownerSubmitsBuffered , testCaseSteps "nextPeerAction requests other txs despite a blocked buffered tx" unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx @@ -2665,51 +2665,247 @@ unit_nextPeerAction_claimsRejectedTxFromOtherAdvertiser step = do , peerAvailableTxIds = IntMap.singleton txKeyInt txSize } --- Verifies that nextPeerAction still requests an oversized first tx when it --- is the only available choice within the soft-budget policy. -prop_nextPeerAction_requestsOversizedFirstTx +-- | Verifies that 'pickRequestTxsAction' obeys the soft-budget batch +-- semantics across a list of available txs: +-- +-- * The first tx in advertisement order is requested even if its +-- size exceeds the per-peer inflight budget (provided the peer +-- hasn't already filled the inflight cap, which it hasn't here). +-- * Each subsequent tx is included while +-- 'selectedSize + txSize <= sizeBudget'. +-- * The walk stops at the first tx that does not fit; later (smaller) +-- txs are not tried. +-- +-- Subsumes the single-tx soft-budget claim (one element in the list) +-- and pins the multi-tx '<= sizeBudget' boundary that catches the +-- 'exceedsBudget' branch logic. +prop_nextPeerAction_picksTxsRespectingBudget :: ArbTxDecisionPolicy -> Positive Int - -> TxId + -> NonEmptyList (Positive Int) + -> Positive Int -> Positive Int -> Property -prop_nextPeerAction_requestsOversizedFirstTx (ArbTxDecisionPolicy basePolicy) (Positive peeraddr) txid0 (Positive txSize0) = - peerTxLocalStateInvariant policy peerState0 .&&. - case peerAction of - PeerRequestTxs [txKey] -> - conjoin - [ txKey === key - , peerRequestedTxs peerState' === IntSet.singleton k - , peerRequestedTxsSize peerState' === txSize - , txLease (lookupEntryOrFail key sharedState') === - TxLeased peeraddr (addTime (interTxSpace policy) now) - , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedState'" sharedState' - ] - _ -> counterexample ("unexpected peer action: " ++ show peerAction) False +prop_nextPeerAction_picksTxsRespectingBudget + (ArbTxDecisionPolicy basePolicy) (Positive peeraddr) + (NonEmpty rawSizes) (Positive budget0) (Positive prefilledCount0) = + tabulate "tx count" [bucket n] + . tabulate "budget mode" [budgetMode] + . tabulate "prefilled count" [bucket prefilledCount] + . tabulate "selected count" [bucket (length expectedSelection)] + $ conjoin + [ peerTxLocalStateInvariant policy peerState0 + , -- 'expectedSelection' empty implies the budget is exhausted + -- (cap consumed or no candidates), so the action must be + -- 'PeerDoNothing'; otherwise it must match exactly. + case (expectedSelection, action) of + ([], PeerDoNothing _ _) -> property True + ([], other) -> + counterexample ("expected PeerDoNothing, got: " ++ show other) + (property False) + (_, PeerRequestTxs txKeys) -> txKeys === expectedKeys + (_, other) -> + counterexample + ("expected PeerRequestTxs " ++ show expectedKeys + ++ ", got: " ++ show other) + (property False) + , counterexample "peerRequestedTxsSize tracks total in-flight" $ + peerRequestedTxsSize peerState' === prefilledSize + sumExpected + , conjoin + [ counterexample ("lease for selected key " ++ show k) $ + txLease (lookupEntryOrFail (TxKey k) sharedState') === + TxLeased peeraddr (addTime (interTxSpace policy) now) + | (k, _) <- expectedSelection + ] + , conjoin + [ counterexample ("prefilled lease unchanged for key " ++ show k) $ + txLease (lookupEntryOrFail (TxKey k) sharedState') === + TxLeased peeraddr (addTime 10 now) + | (k, _) <- prefilledTxs + ] + , conjoin + [ counterexample + ("unselected candidate lease unchanged for key " ++ show k) $ + txLease (lookupEntryOrFail (TxKey k) sharedState') === + TxClaimable now + | (k, _) <- candidateTxs + , TxKey k `notElem` expectedKeys + ] + , conjoin + [ counterexample ("txAttempts for selected key " ++ show k) $ + txAttempts (lookupEntryOrFail (TxKey k) sharedState') === + Map.singleton peeraddr TxDownloading + | (k, _) <- expectedSelection + ] + , conjoin + [ counterexample ("txAttempts unchanged for prefilled key " ++ show k) $ + txAttempts (lookupEntryOrFail (TxKey k) sharedState') === + Map.singleton peeraddr TxDownloading + | (k, _) <- prefilledTxs + ] + , conjoin + [ counterexample ("txAttempts unchanged for unselected key " ++ show k) $ + txAttempts (lookupEntryOrFail (TxKey k) sharedState') === + Map.empty + | (k, _) <- candidateTxs + , TxKey k `notElem` expectedKeys + ] + , combinedStateInvariant policy StrongInvariant peeraddr + peerState' sharedState' + , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) + , checkNoThunks "sharedState'" sharedState' + ] where - txid = abs txid0 + 1 - txSize = mkSize (Positive (txSize0 + 1)) - key = TxKey 0 - k = unTxKey key + txSizes :: [SizeInBytes] + txSizes = map mkSize rawSizes + n = length txSizes + + -- All txs in advertisement order, indexed by 'TxKey'. + indexed :: [(Int, SizeInBytes)] + indexed = zip [0..] txSizes + + keys :: [TxKey] + keys = [TxKey k | (k, _) <- indexed] + + txidFor :: Int -> TxId + txidFor i = i + 1 + + -- Number of pre-existing in-flight txs taken from the start of + -- 'indexed'. Range '[0..n]' covers fully-fresh, partial-prefill, + -- and fully-prefilled (no candidates) configurations. + prefilledCount :: Int + prefilledCount = (prefilledCount0 - 1) `mod` (n + 1) + + (prefilledTxs, candidateTxs) = splitAt prefilledCount indexed + + prefilledSize :: SizeInBytes + prefilledSize = sum (map snd prefilledTxs) + + candidateTotal :: Int + candidateTotal = sum (map (fromIntegral . getSizeInBytes . snd) candidateTxs) + + -- Budget split into three regions to keep the multi-tx fitting + -- cases well-represented in the corpus rather than getting drowned + -- by cap-consumed runs: + -- + -- * 25% "low" ('[0..prefilledSize-1]' or just 0 when + -- prefilledSize is 0) -- exercises 'cap consumed'. + -- * 50% "mid" ('[prefilledSize..prefilledSize+candidateTotal]') + -- -- the partial-fitting boundary where S4-shape budget + -- mutations live. + -- * 25% "high" ('[prefilledSize+candidateTotal+1..]') -- "all + -- fits" with budget to spare. + -- + -- 'budget0' is split: bottom two bits select the region, upper + -- bits position within it. + budgetVal :: Int + budgetVal = + let region = (budget0 - 1) `mod` 4 + offset = (budget0 - 1) `div` 4 + pSize = fromIntegral prefilledSize + in case region of + 0 -> offset `mod` max 1 pSize + 3 -> pSize + candidateTotal + 1 + + offset `mod` max 1 candidateTotal + _ -> pSize + offset `mod` (candidateTotal + 1) + budget :: SizeInBytes + budget = fromIntegral budgetVal + + -- Remaining budget after accounting for in-flight prefilled bytes. + sizeBudget :: Int + sizeBudget = max 0 (budgetVal - fromIntegral prefilledSize) + + -- Independent expected selection mirroring production's + -- 'exceedsBudget' with non-zero starting 'peerRequestedTxsSize'. + -- Soft-budget allowance for the first candidate fires only when + -- 'prefilledSize < budget' (the cap isn't already consumed). + -- Accumulators carried as 'Int' to avoid 'SizeInBytes' overflow in + -- the wider arithmetic. + expectedSelection :: [(Int, SizeInBytes)] + expectedSelection = go [] (0 :: Int) candidateTxs + where + go acc _ [] = reverse acc + go acc tot ((k, s) : rest) + | exceedsBudget tot (fromIntegral s) = reverse acc + | otherwise = go ((k, s) : acc) (tot + fromIntegral s) rest + + exceedsBudget :: Int -> Int -> Bool + exceedsBudget selectedSize txSize + | selectedSize + txSize <= sizeBudget = False + | selectedSize > 0 = True + | otherwise = fromIntegral prefilledSize >= budgetVal + + expectedKeys :: [TxKey] + expectedKeys = [TxKey k | (k, _) <- expectedSelection] + + sumExpected :: SizeInBytes + sumExpected = sum (map snd expectedSelection) + + budgetMode + | null candidateTxs = "no candidates" + | budgetVal >= fromIntegral prefilledSize + candidateTotal = "all fits" + | fromIntegral prefilledSize >= budgetVal = "cap consumed" + | maybe False (\(_, s) -> sizeBudget < fromIntegral s) + (listToMaybeFst candidateTxs) = "only first (soft)" + | otherwise = "partial" + where + listToMaybeFst :: [a] -> Maybe a + listToMaybeFst [] = Nothing + listToMaybeFst (x : _) = Just x + + -- 'maxOutstandingTxBatchesPerPeer' pinned to at least 2 so the + -- prefilled batch (when present) plus a fresh request batch fit. policy = basePolicy - { txsSizeInflightPerPeer = txSize - 1 - , maxOutstandingTxBatchesPerPeer = 1 + { txsSizeInflightPerPeer = budget + , maxOutstandingTxBatchesPerPeer = + max 2 (maxOutstandingTxBatchesPerPeer basePolicy) } + sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr - (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , sharedTxIdToKey = Map.singleton (getRawTxId txid) key - , sharedKeyToTxId = IntMap.singleton k txid - , sharedNextTxKey = 1 - , sharedTxTable = IntMap.singleton k (mkTxEntry peeraddr txSize Nothing policy) + (withAdvertisedTxKeys keys (mkSharedPeerState PeerIdle)) + , sharedTxIdToKey = Map.fromList + [ (getRawTxId (txidFor i), TxKey i) | (i, _) <- indexed ] + , sharedKeyToTxId = IntMap.fromList + [ (i, txidFor i) | (i, _) <- indexed ] + , sharedNextTxKey = n + , sharedTxTable = IntMap.fromList + [ (i, mkEntry i) | (i, _) <- indexed ] } + where + mkEntry i + | i < prefilledCount = TxEntry + { txLease = TxLeased peeraddr (addTime 10 now) + , txAdvertiserCount = 1 + , txAttempts = Map.singleton peeraddr TxDownloading + , currentMaxInflightMultiplicity = txInflightMultiplicity policy + } + | otherwise = TxEntry + { txLease = TxClaimable now + , txAdvertiserCount = 1 + , txAttempts = Map.empty + , currentMaxInflightMultiplicity = txInflightMultiplicity policy + } + peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerAvailableTxIds = IntMap.singleton k txSize - , peerRequestedTxIds = maxNumTxIdsToRequest policy + { peerUnacknowledgedTxIds = StrictSeq.fromList keys + , peerAvailableTxIds = IntMap.fromList indexed + , peerRequestedTxs = IntSet.fromList + [ k | (k, _) <- prefilledTxs ] + , peerRequestedTxBatches = case prefilledTxs of + [] -> StrictSeq.empty + xs -> StrictSeq.singleton + (mkRequestedTxBatch + [TxKey k | (k, _) <- xs] + prefilledSize) + , peerRequestedTxsSize = prefilledSize + -- 'peerRequestedTxIds' pinned at the cap so the action under test + -- is 'PeerRequestTxs' (or 'PeerDoNothing'), not 'PeerRequestTxIds'. + , peerRequestedTxIds = maxNumTxIdsToRequest policy } - (peerAction, peerState', sharedState') = nextPeerAction now policy peeraddr peerState0 sharedState0 + + (action, peerState', sharedState') = + nextPeerAction now policy peeraddr peerState0 sharedState0 -- Verifies that nextPeerAction skips available txs blocked by another -- peer's lease and requests a later claimable tx instead. From 275a95a45d4e7e2d445eaee7e064376f374d60e6 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 1 May 2026 14:34:14 +0200 Subject: [PATCH 57/67] fixup: multi peer benchmark Convert the direct server benchmark into one that can run multiple peers with async. --- .../bench/Bench/TxSubmissionV2Server.hs | 87 ++++++++++++------- ouroboros-network/bench/Main.hs | 15 ++++ ouroboros-network/ouroboros-network.cabal | 1 + 3 files changed, 70 insertions(+), 33 deletions(-) diff --git a/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs b/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs index ce4c42180c6..c1ea4134e7c 100644 --- a/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs +++ b/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs @@ -10,12 +10,14 @@ module Bench.TxSubmissionV2Server ( DirectServerFixture , DirectServerResult , mkDirectServerFixture + , mkMultiPeerFixture , runDirectServerBenchmark ) where import Control.Concurrent.Class.MonadSTM qualified as Lazy import Control.Concurrent.Class.MonadSTM.Strict import Control.DeepSeq (NFData) +import Control.Monad.Class.MonadAsync (mapConcurrently_) import Control.Tracer (nullTracer) import Data.List.NonEmpty qualified as NonEmpty @@ -40,7 +42,7 @@ import Test.Ouroboros.Network.TxSubmission.Types data DirectServerFixture = DirectServerFixture - { dsPeerAddr :: !Int + { dsPeerCount :: !Int , dsTxIdReplyBatches :: !Int , dsTxSize :: !SizeInBytes } @@ -65,9 +67,28 @@ mkDirectServerFixture :: Int -> DirectServerFixture mkDirectServerFixture batches = DirectServerFixture - { dsPeerAddr = 1 + { dsPeerCount = 1 , dsTxIdReplyBatches = batches - , dsTxSize = SizeInBytes 1024 + , dsTxSize = SizeInBytes 1024 + } + + +-- | Multi-peer fixture: @peers@ peers each playing back the same txid stream. +-- +-- Models the mainnet steady state where many upstream peers advertise the +-- same fresh txids. The first peer downloads each tx body and submits it to +-- the mempool, which puts the key into 'sharedRetainedTxs'. Subsequent +-- peers find the txid in the retained set and ack-skip without ever +-- requesting the body. +mkMultiPeerFixture + :: Int -- ^ peer count + -> Int -- ^ batches per peer + -> DirectServerFixture +mkMultiPeerFixture peers batches = + DirectServerFixture + { dsPeerCount = peers + , dsTxIdReplyBatches = batches + , dsTxSize = SizeInBytes 1024 } @@ -75,7 +96,7 @@ runDirectServerBenchmark :: DirectServerFixture -> IO DirectServerResult runDirectServerBenchmark DirectServerFixture { - dsPeerAddr, + dsPeerCount, dsTxIdReplyBatches, dsTxSize } = do @@ -84,35 +105,35 @@ runDirectServerBenchmark sharedStateVar <- newSharedTxStateVar emptySharedTxState countersVar <- newTxSubmissionCountersVar mempty - withPeer - defaultTxDecisionPolicy - (getMempoolReader inboundMempool) - sharedStateVar - countersVar - dsPeerAddr - $ \api -> do - let server = - txSubmissionInboundV2 - nullTracer - NoTxSubmissionInitDelay - defaultTxDecisionPolicy - (getMempoolWriter duplicateTxIdsVar inboundMempool) - getTxSize - api - - case server of - TxSubmissionServerPipelined initServer -> do - st0 <- initServer - driveServer - dsTxSize - dsTxIdReplyBatches - 1 - [] - st0 - - (DirectServerResult - . length <$> readMempool inboundMempool) - <*> readTVarIO countersVar + let runPeer addr = + withPeer + defaultTxDecisionPolicy + (getMempoolReader inboundMempool) + sharedStateVar + countersVar + addr + $ \api -> do + let server = + txSubmissionInboundV2 + nullTracer + NoTxSubmissionInitDelay + defaultTxDecisionPolicy + (getMempoolWriter duplicateTxIdsVar inboundMempool) + getTxSize + api + case server of + TxSubmissionServerPipelined initServer -> do + st0 <- initServer + driveServer dsTxSize dsTxIdReplyBatches 1 [] st0 + + -- Spawn one async per peer. Peers race for tx leases, contend on + -- the shared STM state, and exercise the scheduler the same way + -- they do on mainnet. + mapConcurrently_ runPeer [1 .. dsPeerCount] + + (DirectServerResult + . length <$> readMempool inboundMempool) + <*> readTVarIO countersVar driveServer diff --git a/ouroboros-network/bench/Main.hs b/ouroboros-network/bench/Main.hs index b5535d1242d..2f697ae894e 100644 --- a/ouroboros-network/bench/Main.hs +++ b/ouroboros-network/bench/Main.hs @@ -43,6 +43,21 @@ main = $ \fixture -> bench "server/direct-interpreter/single-peer/1000batches" $ nfAppIO DirectV2.runDirectServerBenchmark fixture + , env + (prepareEnv (DirectV2.mkMultiPeerFixture 10 1_000)) + $ \fixture -> + bench "server/direct-interpreter/multi-peer/10peers/1000batches" $ + nfAppIO DirectV2.runDirectServerBenchmark fixture + , env + (prepareEnv (DirectV2.mkMultiPeerFixture 100 100)) + $ \fixture -> + bench "server/direct-interpreter/multi-peer/100peers/100batches" $ + nfAppIO DirectV2.runDirectServerBenchmark fixture + , env + (prepareEnv (DirectV2.mkMultiPeerFixture 100 1_000)) + $ \fixture -> + bench "server/direct-interpreter/multi-peer/100peers/1000batches" $ + nfAppIO DirectV2.runDirectServerBenchmark fixture ] ] ] diff --git a/ouroboros-network/ouroboros-network.cabal b/ouroboros-network/ouroboros-network.cabal index 4dd8c5d2246..af86d13a840 100644 --- a/ouroboros-network/ouroboros-network.cabal +++ b/ouroboros-network/ouroboros-network.cabal @@ -321,6 +321,7 @@ library psqueues >=0.2.3 && <0.3, random, strict-checked-vars ^>=0.2, + time, transformers, typed-protocols ^>=1.2, From e2c1f099f0650926d6d5146fa71a495016f3cc3b Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 1 May 2026 18:26:29 +0200 Subject: [PATCH 58/67] fixup: faster diffTimeToMilliseconds Avoid Double conversions, similar to muxs diffTimeToMicroseconds. --- .../Ouroboros/Network/TxSubmission/Inbound/V2.hs | 6 +++--- .../Network/TxSubmission/Inbound/V2/Types.hs | 13 +++++++++---- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs index 39ca5f13c0a..c6cbb85223f 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs @@ -132,7 +132,7 @@ txSubmissionInboundV2 Nothing -> pure peerState Just startTime -> do addCounters mempty { txPipelineWaitMs = - diffTimeToMillis (now `diffTime` startTime) } + diffTimeToMilliseconds (now `diffTime` startTime) } pure $ peerState { peerDownloadStartTime = Nothing } (peerAction, peerState'') <- runNextPeerAction now (State.drainPeerScore policy now peerState') case peerAction of @@ -170,7 +170,7 @@ txSubmissionInboundV2 rejectedCount = length rejectedForTrace delta = end `diffTime` start - addCounters mempty { txSubmissionWaitMs = diffTimeToMillis delta } + addCounters mempty { txSubmissionWaitMs = diffTimeToMilliseconds delta } peerState' <- applySubmittedTxs end resolvedTxKeys (fmap fst rejectedTxs) peerState let (score, peerState'') = State.applyPeerRejections policy end rejectedCount peerState' traceWith tracer $ @@ -264,7 +264,7 @@ txSubmissionInboundV2 (traceWith tracer TraceTxInboundTerminated) (\txids -> do now <- getMonotonicTime - addCounters mempty { txIdBlockingWaitMs = diffTimeToMillis (now `diffTime` sendTime) } + addCounters mempty { txIdBlockingWaitMs = diffTimeToMilliseconds (now `diffTime` sendTime) } let txids' = NonEmpty.toList txids unless (length txids' <= fromIntegral txIdsToReq) $ throwIO ProtocolErrorTxIdsNotRequested diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index ac6cc62e5e5..8fff64e9976 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -8,6 +8,7 @@ {-# LANGUAGE GADTs #-} {-# LANGUAGE GeneralisedNewtypeDeriving #-} {-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE NumericUnderscores #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TypeApplications #-} @@ -69,7 +70,7 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.Types , emptyPeerScore , emptyPeerTxLocalState , emptySharedTxState - , diffTimeToMillis + , diffTimeToMilliseconds ) where import Control.DeepSeq (NFData) @@ -82,6 +83,7 @@ import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map import Data.Sequence.Strict (StrictSeq) import Data.Set qualified as Set +import Data.Time.Clock (diffTimeToPicoseconds) import Data.Typeable (Typeable, eqT, (:~:) (Refl)) import GHC.Generics (Generic) @@ -370,9 +372,12 @@ instance Monoid TxSubmissionCounters where txSubmissionWaitMs = 0 } --- | Convert a 'DiffTime' to whole milliseconds, rounding to nearest. -diffTimeToMillis :: DiffTime -> Word64 -diffTimeToMillis dt = round (realToFrac dt * 1000 :: Double) +-- | Convert a non-negative 'DiffTime' to whole milliseconds (truncated). +-- +-- Works directly on the underlying picosecond 'Integer' to avoid the +-- 'realToFrac' detour through 'Rational' and 'Double'. +diffTimeToMilliseconds :: DiffTime -> Word64 +diffTimeToMilliseconds = fromInteger . (`div` 1_000_000_000) . diffTimeToPicoseconds emptyPeerScore :: Time -> PeerScore emptyPeerScore scoreTs = PeerScore { From 73bdcf77cb1ce6b4555f38cb37a9b31eefc39b9c Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Fri, 1 May 2026 18:27:46 +0200 Subject: [PATCH 59/67] fixup: remove a bumpGeneration --- .../Network/TxSubmission/Inbound/V2/State.hs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 2ace773c1bc..facb4c7e49e 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -208,8 +208,6 @@ applyRequestTxsChoice ctx txsToRequest txsToRequestSize txTable = , sharedState'' ) where - requestedKeys = IntSet.fromList (unTxKey <$> txsToRequest) - peerState'' = (pacPeerState ctx) { peerRequestedTxs = @@ -221,13 +219,14 @@ applyRequestTxsChoice ctx txsToRequest txsToRequestSize txTable = }, peerRequestedTxsSize = peerRequestedTxsSize (pacPeerState ctx) + txsToRequestSize } + -- Take the lease without waking other advertisers: claiming doesn't + -- give them a new option (they couldn't claim before this commit, and + -- they still can't), and they'll be bumped on submit / lease release. sharedState'' = - bumpPeerGenerations - (advertisingPeersForTxKeysExcept (pacPeerAddr ctx) requestedKeys (pacSharedState ctx)) - ((pacSharedState ctx) { - sharedTxTable = txTable, - sharedGeneration = sharedGeneration (pacSharedState ctx) + 1 - }) + (pacSharedState ctx) { + sharedTxTable = txTable, + sharedGeneration = sharedGeneration (pacSharedState ctx) + 1 + } -- | Construct a 'PeerRequestTxIds' action and update local and shared txid state. applyRequestTxIdsChoice From f69fae48366abbe2725d9e775c1ff5658b956fd6 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Sat, 2 May 2026 03:15:11 +0200 Subject: [PATCH 60/67] fixup: move peer phase to PeerTxLocalState --- .../Network/TxSubmission/Inbound/V2.hs | 14 +- .../TxSubmission/Inbound/V2/Registry.hs | 72 +------- .../Network/TxSubmission/Inbound/V2/State.hs | 27 ++- .../Network/TxSubmission/Inbound/V2/Types.hs | 15 +- .../Ouroboros/Network/OrphanInstances.hs | 10 +- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 162 +++++------------- .../Ouroboros/Network/Tracing/TxSubmission.hs | 8 +- 7 files changed, 100 insertions(+), 208 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs index c6cbb85223f..d5fd2ae6550 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2.hs @@ -137,8 +137,18 @@ txSubmissionInboundV2 (peerAction, peerState'') <- runNextPeerAction now (State.drainPeerScore policy now peerState') case peerAction of PeerDoNothing generation mDelay -> do - awaitSharedChange generation mDelay - continueWithStateM serverIdle peerState'' + -- An Active->Idle transition means this peer has just become + -- eligible for actions it could not take before (e.g. claiming + -- expired leases as an idle claimant). Re-run the scheduler + -- immediately rather than parking on a wake condition that may + -- not fire. + let cameToIdle = peerPhase peerState' /= PeerIdle + && peerPhase peerState'' == PeerIdle + if cameToIdle + then continueWithStateM serverIdle peerState'' + else do + awaitSharedChange generation mDelay + continueWithStateM serverIdle peerState'' PeerSubmitTxs txKeys -> continueWithStateM (submitBufferedTxs txKeys serverIdle) peerState'' PeerRequestTxs txKeys -> diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 65ae9904e59..807b3c828fb 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -10,8 +10,6 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.Registry , newTxSubmissionCountersVar , txCountersThreadV2 , withPeer - -- Exported for testing - , updatePeerPhase ) where import Control.Concurrent.Class.MonadSTM qualified as Lazy @@ -25,7 +23,6 @@ import Data.IntMap.Strict qualified as IntMap import Data.IntSet qualified as IntSet import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map -import Data.Set qualified as Set import Data.Void (Void) import Data.Word (Word64) @@ -196,7 +193,6 @@ withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar } where sharedPeerState = SharedPeerState { - sharedPeerPhase = PeerIdle, sharedPeerAdvertisedTxKeys = IntSet.empty, sharedPeerGeneration = 0 } @@ -344,9 +340,7 @@ runNextPeerActionImp policy sharedStateVar countersVar peeraddr now peerState = let sharedGeneration0 = sharedGeneration sharedState (peerAction, peerState', sharedState') = State.nextPeerAction now policy peeraddr peerState sharedState - sharedState'' = updatePeerPhase peeraddr - (peerPhaseForActionIdle peerAction) sharedState' - writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState'' + writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' updateCountersForAction countersVar peerAction return (peerAction, peerState') @@ -371,10 +365,7 @@ runNextPeerActionPipelinedImp policy sharedStateVar countersVar peeraddr now pee let sharedGeneration0 = sharedGeneration sharedState (peerAction, peerState', sharedState') = State.nextPeerActionPipelined now policy peeraddr peerState sharedState - sharedState'' = updatePeerPhase peeraddr - (peerPhaseForActionPipelined peeraddr peerAction sharedState') - sharedState' - writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState'' + writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' updateCountersForAction countersVar peerAction return (peerAction, peerState') @@ -517,62 +508,3 @@ resolveBufferedTxsImp sharedStateVar peerState txKeys = atomically $ do Nothing -> error "TxSubmission.V2.resolveBufferedTxsImp: missing buffered tx" ) --- | Update a peer's phase. --- --- A phase change always bumps the shared generation. In addition: --- --- * When a peer becomes 'PeerIdle', bump that peer's own generation so a --- 'PeerDoNothing' action computed before the phase change does not put that --- same peer thread to sleep on a stale generation. This makes its next --- 'awaitSharedChange' return immediately and re-run scheduling as an idle --- claimant. --- * When a peer leaves idle, bump idle advertisers so they can immediately --- compete for any leases the departing peer held. -updatePeerPhase - :: Ord peeraddr - => peeraddr - -> PeerPhase - -> SharedTxState peeraddr txid - -> SharedTxState peeraddr txid -updatePeerPhase peeraddr peerPhaseNew - st@SharedTxState { sharedPeers, sharedGeneration } = - case Map.lookup peeraddr sharedPeers of - Just sharedPeerState -> - let peerPhaseOld = sharedPeerPhase sharedPeerState in - if peerPhaseOld /= peerPhaseNew - then - let sharedPeerState' = sharedPeerState { sharedPeerPhase = peerPhaseNew } - st' = st { sharedPeers = Map.insert peeraddr sharedPeerState' sharedPeers - , sharedGeneration = sharedGeneration + 1 } - in bumpPeerGenerations (phaseWakePeers peerPhaseOld) st' - else st - _ -> st -- TODO error? - where - phaseWakePeers peerPhaseOld - | peerPhaseOld /= PeerIdle - , peerPhaseNew == PeerIdle = Set.singleton peeraddr - | otherwise = Set.empty - -peerPhaseForActionIdle :: PeerAction -> PeerPhase -peerPhaseForActionIdle peerAction = - case peerAction of - PeerDoNothing {} -> PeerIdle - PeerSubmitTxs {} -> PeerSubmittingToMempool - PeerRequestTxs {} -> PeerWaitingTxs - PeerRequestTxIds {} -> PeerWaitingTxIds - -peerPhaseForActionPipelined - :: Ord peeraddr - => peeraddr - -> PeerAction - -> SharedTxState peeraddr txid - -> PeerPhase -peerPhaseForActionPipelined peeraddr peerAction sharedState = - case peerAction of - PeerDoNothing {} -> peerPhaseOf peeraddr sharedState - PeerSubmitTxs {} -> PeerSubmittingToMempool - PeerRequestTxs {} -> PeerWaitingTxs - PeerRequestTxIds {} -> PeerWaitingTxIds - where - peerPhaseOf peer st = - maybe PeerIdle sharedPeerPhase (Map.lookup peer (sharedPeers st)) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index facb4c7e49e..81e7cf5ea0b 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -123,7 +123,8 @@ nextPeerActionPipelined = nextPeerActionWithMode AllowPipelinedTxIdRequests -- -- nextPeerActionWithMode handles body requests for txs this peer may currently -- fetch, tx submission for bodies buffered locally by this peer, and txid ack/request --- messages. +-- messages. Updates 'peerPhase' on the returned 'PeerTxLocalState' to +-- reflect the chosen action. nextPeerActionWithMode :: (Ord peeraddr, HasRawTxId txid) => TxIdRequestMode -> Time @@ -133,11 +134,30 @@ nextPeerActionWithMode :: (Ord peeraddr, HasRawTxId txid) -> SharedTxState peeraddr txid -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) nextPeerActionWithMode txIdRequestMode now policy peeraddr peerState sharedState = - applyPeerActionChoice ctx (pickPeerActionChoice txIdRequestMode ctx) + let (action, peerState', sharedState'') = + applyPeerActionChoice ctx (pickPeerActionChoice txIdRequestMode ctx) + peerState'' = peerState' { + peerPhase = phaseForAction txIdRequestMode (peerPhase peerState) action + } + in (action, peerState'', sharedState'') where sharedState' = bumpStuckEntries now policy peeraddr peerState sharedState ctx = mkPeerActionContext now policy peeraddr peerState sharedState' +-- | Compute the new 'PeerPhase' for the chosen 'PeerAction'. +-- +-- In pipelined mode a 'PeerDoNothing' keeps the current phase (the peer +-- is still mid-pipeline, just waiting for replies). In non-pipelined mode +-- a 'PeerDoNothing' transitions to 'PeerIdle'. +phaseForAction :: TxIdRequestMode -> PeerPhase -> PeerAction -> PeerPhase +phaseForAction txIdRequestMode currentPhase action = case action of + PeerDoNothing {} -> case txIdRequestMode of + AllowPipelinedTxIdRequests -> currentPhase + AllowAnyTxIdRequests -> PeerIdle + PeerSubmitTxs {} -> PeerSubmittingToMempool + PeerRequestTxs {} -> PeerWaitingTxs + PeerRequestTxIds {} -> PeerWaitingTxIds + -- | Pick which action to perform next. -- pickPeerActionChoice :: Ord peeraddr @@ -196,8 +216,7 @@ applySubmitChoice ctx txsToSubmit = ) -- | Construct a 'PeerRequestTxs' action and update local and shared tx state. -applyRequestTxsChoice :: Ord peeraddr - => PeerActionContext peeraddr txid tx +applyRequestTxsChoice :: PeerActionContext peeraddr txid tx -> [TxKey] -> SizeInBytes -> IntMap.IntMap (TxEntry peeraddr) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 8fff64e9976..694d4fba8f7 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -387,10 +387,17 @@ emptyPeerScore scoreTs = PeerScore { -- | Per-peer protocol state. -- --- These are the pieces of state that naturally belong to the worker --- thread handling one peer. Shared arbitration state such as peer --- phase is kept separately in 'SharedPeerState'. +-- Owned by the worker thread for one peer. Includes the peer's coarse +-- protocol phase ('peerPhase'), which is purely peer-local: only this +-- peer's own scheduler reads and updates it, so it doesn't belong in +-- shared state. data PeerTxLocalState tx = PeerTxLocalState { + -- | Coarse phase of this peer's worker thread. Updated by + -- 'nextPeerAction' / 'nextPeerActionPipelined' when an action is + -- chosen and inspected in 'serverIdle' to decide whether to skip + -- 'awaitSharedChange' on an Active->Idle transition. + peerPhase :: !PeerPhase, + -- | Unacknowledged txids in the order advertised by the peer. peerUnacknowledgedTxIds :: !(StrictSeq TxKey), @@ -422,6 +429,7 @@ data PeerTxLocalState tx = PeerTxLocalState { emptyPeerTxLocalState :: PeerTxLocalState tx emptyPeerTxLocalState = PeerTxLocalState { + peerPhase = PeerIdle, peerUnacknowledgedTxIds = StrictSeq.empty, peerAvailableTxIds = IntMap.empty, peerRequestedTxs = IntSet.empty, @@ -436,7 +444,6 @@ emptyPeerTxLocalState = PeerTxLocalState { -- | Small shared view of peer state used for lease claiming and peer -- selection. data SharedPeerState = SharedPeerState { - sharedPeerPhase :: !PeerPhase, sharedPeerAdvertisedTxKeys :: !IntSet, sharedPeerGeneration :: !Word64 } diff --git a/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs b/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs index 358a74aae97..e71edcd020e 100644 --- a/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs +++ b/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs @@ -88,7 +88,7 @@ import Ouroboros.Network.Server.RateLimiting (AcceptConnectionsPolicyTrace (..), AcceptedConnectionsLimit (..)) import Ouroboros.Network.Snocket (LocalAddress (..), RemoteAddress) import Ouroboros.Network.TxSubmission.Inbound.V2.Types - (ProcessedTxCount (..), SharedPeerState (..), + (ProcessedTxCount (..), SharedTxState (..), TraceTxLogic (..), TraceTxSubmissionInbound (..), TxAttemptState (..), TxEntry (..), TxLease (..), TxSubmissionLogicVersion (..), retainedSize, @@ -1849,12 +1849,8 @@ traceSharedTxStateToJSON SharedTxState { | TxEntry { txAttempts } <- activeEntries ] - peerPhases = - Map.toList $ - Map.fromListWith (+) - [ (show sharedPeerPhase, 1 :: Int) - | SharedPeerState { sharedPeerPhase } <- Map.elems sharedPeers - ] + peerPhases :: [(String, Int)] + peerPhases = [] renderTxId txKey = maybe "" show (IntMap.lookup txKey sharedKeyToTxId) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index ca3ecb604c9..39f22fc41a4 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -46,7 +46,6 @@ import NoThunks.Class (NoThunks, unsafeNoThunks) import Ouroboros.Network.Protocol.TxSubmission2.Type import Ouroboros.Network.Tx (HasRawTxId (..), RawTxId, getRawTxId) import Ouroboros.Network.TxSubmission.Inbound.V2.Policy -import Ouroboros.Network.TxSubmission.Inbound.V2.Registry (updatePeerPhase) import Ouroboros.Network.TxSubmission.Inbound.V2.State import Ouroboros.Network.TxSubmission.Inbound.V2.Types @@ -101,8 +100,6 @@ tests = , testProperty "handleSubmittedTxs bumps advertiser generations" prop_handleSubmittedTxs_bumpsAdvertisers , testCaseSteps "advertisingPeersForTxExcept scans the authoritative peer key sets" unit_advertisingPeersForTxExcept_scansAuthoritativePeerSets , testCaseSteps "removeAdvertisingPeersForResolvedTx clears all advertising peers for a resolved key" unit_removeAdvertisingPeersForResolvedTx_clearsAllAdvertisingPeers - , testCaseSteps "updatePeerPhase only wakes the peer becoming idle" unit_updatePeerPhase_wakesOnlyBecomingIdlePeer - , testCaseSteps "updatePeerPhase wakes competing idle advertisers when a peer leaves idle" unit_updatePeerPhase_wakesCompetingAdvertisers ] -- @@ -551,7 +548,7 @@ instance Arbitrary ArbSharedPeerState where { sharedPeerGeneration = 0 } ] where - defaultPeerState = mkSharedPeerState PeerIdle + defaultPeerState = mkSharedPeerState instance Arbitrary ArbPeerTxLocalState where arbitrary = ArbPeerTxLocalState <$> genPeerTxLocalState @@ -827,8 +824,6 @@ prop_handleReceivedTxIds [ counterexample "other peer's advertised keys not restored" (sharedPeerAdvertisedTxKeys post === sharedPeerAdvertisedTxKeys original) - , counterexample "other peer's phase changed unexpectedly" - (sharedPeerPhase post === sharedPeerPhase original) , counterexample "other peer's generation bump mismatch" (sharedPeerGeneration post === sharedPeerGeneration original + 1) @@ -996,8 +991,8 @@ unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull step = do k = unTxKey key sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (peerA, mkSharedPeerState PeerIdle) - , (peerB, mkSharedPeerState PeerIdle) + [ (peerA, mkSharedPeerState) + , (peerB, mkSharedPeerState) ] } peerAState0 = emptyPeerTxLocalState @@ -1309,8 +1304,6 @@ prop_handleReceivedTxs [ counterexample "other peer's advertised keys changed" (sharedPeerAdvertisedTxKeys post === sharedPeerAdvertisedTxKeys original) - , counterexample "other peer's phase changed" - (sharedPeerPhase post === sharedPeerPhase original) , counterexample "other peer's generation bump mismatch" (sharedPeerGeneration post === sharedPeerGeneration original + 1) @@ -1844,7 +1837,7 @@ buildTriggerState policy perPeer = mkSharedPeer _ ts = let advKeys = [ TxKey (txidKey (triggerTxid t)) | t <- ts, hasActiveEntry t ] in - withAdvertisedTxKeys advKeys (mkSharedPeerState PeerIdle) + withAdvertisedTxKeys advKeys (mkSharedPeerState) sharedPeers' = Map.mapWithKey mkSharedPeer perPeer @@ -2422,14 +2415,14 @@ prop_nextPeerAction_claimsClaimableTx ClaimableLease -> [] ExpiredLease -> [ ( oldOwner - , withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs) + , withAdvertisedTxKeys [key] (mkSharedPeerState) ) ] sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - $ [ (goodPeer, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (badPeer, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (confPeer, mkSharedPeerState PeerIdle) + $ [ (goodPeer, withAdvertisedTxKeys [key] (mkSharedPeerState)) + , (badPeer, withAdvertisedTxKeys [key] (mkSharedPeerState)) + , (confPeer, mkSharedPeerState) ] ++ oldOwnerEntries , sharedTxIdToKey = Map.singleton (getRawTxId txid) key @@ -2538,7 +2531,7 @@ unit_nextPeerAction_claimsAtScoreDelayThreshold step = do sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr - (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + (withAdvertisedTxKeys [key] (mkSharedPeerState)) , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid , sharedNextTxKey = 1 @@ -2863,7 +2856,7 @@ prop_nextPeerAction_picksTxsRespectingBudget sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr - (withAdvertisedTxKeys keys (mkSharedPeerState PeerIdle)) + (withAdvertisedTxKeys keys (mkSharedPeerState)) , sharedTxIdToKey = Map.fromList [ (getRawTxId (txidFor i), TxKey i) | (i, _) <- indexed ] , sharedKeyToTxId = IntMap.fromList @@ -2938,9 +2931,9 @@ unit_nextPeerAction_skipsBlockedAvailableTxs step = do sharedState = emptySharedTxState { sharedPeers = Map.fromList [ (peeraddr, withAdvertisedTxKeys [blockedKey, claimableKey] - (mkSharedPeerState PeerIdle)) + (mkSharedPeerState)) , (otherPeer, withAdvertisedTxKeys [blockedKey] - (mkSharedPeerState PeerIdle)) + (mkSharedPeerState)) ] , sharedTxTable = IntMap.fromList [ (kBlocked, TxEntry @@ -2973,7 +2966,7 @@ prop_nextPeerAction_ownerSubmitsBuffered (ArbTxDecisionPolicy policy) (Positive PeerSubmitTxs [txKey] -> conjoin [ txKey === key - , peerState' === peerState0 + , peerState' === peerState0 { peerPhase = PeerSubmittingToMempool } -- Submit selection atomically marks the chosen tx as TxSubmitting -- so concurrent peer decisions exclude it. , sharedState' === markSubmittingTxs peeraddr [key] sharedState0 @@ -2988,7 +2981,7 @@ prop_nextPeerAction_ownerSubmitsBuffered (ArbTxDecisionPolicy policy) (Positive sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr - (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + (withAdvertisedTxKeys [key] (mkSharedPeerState)) , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxClaimable (Time 0) , txAdvertiserCount = 1 @@ -3048,9 +3041,9 @@ unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx step = do sharedState0 = emptySharedTxState { sharedPeers = Map.fromList [ (peeraddr, withAdvertisedTxKeys [blockedKey, claimableKey] - (mkSharedPeerState PeerIdle)) + (mkSharedPeerState)) , (submittingPeer, withAdvertisedTxKeys [blockedKey] - (mkSharedPeerState PeerSubmittingToMempool)) + (mkSharedPeerState)) ] , sharedTxTable = IntMap.fromList [ (kBlocked, blockedEntry) @@ -3123,9 +3116,9 @@ unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx step = do sharedState0 = emptySharedTxState { sharedPeers = Map.fromList [ (peeraddr, withAdvertisedTxKeys [blockedKey] - (mkSharedPeerState PeerIdle)) + (mkSharedPeerState)) , (submittingPeer, withAdvertisedTxKeys [blockedKey] - (mkSharedPeerState PeerSubmittingToMempool)) + (mkSharedPeerState)) ] , sharedTxTable = IntMap.singleton kBlocked blockedEntry , sharedRetainedTxs = retainedSingleton kResolved (addTime 17 now) @@ -3183,8 +3176,8 @@ prop_nextPeerAction_nonOwnerWaitsUntilResolved (ArbTxDecisionPolicy policy) (Pos key = TxKey 0 k = unTxKey key sharedPeers0 = Map.fromList - [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (peeraddr, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) + [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState)) + , (peeraddr, withAdvertisedTxKeys [key] (mkSharedPeerState)) ] unresolvedSharedState = emptySharedTxState { sharedPeers = sharedPeers0 @@ -3244,7 +3237,7 @@ prop_nextPeerActionPipelined_requiresAckAndReq (ArbTxDecisionPolicy policy) (Pos , peerRequestedTxIds = maxNumTxIdsToRequest policy } sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState) , sharedRetainedTxs = retainedSingleton k (addTime 17 now) , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid @@ -3284,7 +3277,7 @@ prop_nextPeerActionPipelined_requestsTxIds (ArbTxDecisionPolicy policy) (Positiv , peerRequestedTxIds = 0 } sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState) , sharedRetainedTxs = retainedSingleton k (addTime 17 now) , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid @@ -3326,7 +3319,7 @@ unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply step = do , peerRequestedTxsSize = requestedTxBatchSize requestedBatch } sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState) , sharedRetainedTxs = retainedFromList [ (unTxKey keyA, addTime 17 now) @@ -3391,7 +3384,7 @@ prop_nextPeerActionPipelined_secondBodyBatch (ArbTxDecisionPolicy basePolicy) (P sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr - (withAdvertisedTxKeys [keyA, keyB] (mkSharedPeerState PeerIdle)) + (withAdvertisedTxKeys [keyA, keyB] (mkSharedPeerState)) , sharedTxTable = IntMap.fromList [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading) policy) , (kB, TxEntry @@ -3463,7 +3456,7 @@ prop_nextPeerActionPipelined_noThirdBodyBatch (ArbTxDecisionPolicy basePolicy) ( sharedState0 = emptySharedTxState { sharedPeers = Map.singleton peeraddr - (withAdvertisedTxKeys [keyA, keyB, keyC] (mkSharedPeerState PeerIdle)) + (withAdvertisedTxKeys [keyA, keyB, keyC] (mkSharedPeerState)) , sharedTxTable = IntMap.fromList [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading) policy) , (kB, mkTxEntry peeraddr txSizeB (Just TxDownloading) policy) @@ -3508,7 +3501,7 @@ prop_nextPeerAction_prunesExpiredRetained (ArbTxDecisionPolicy policy) (Positive idlePeerState :: PeerTxLocalState (Tx TxId) idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest policy } sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState) , sharedRetainedTxs = retainedSingleton k now , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid @@ -3548,7 +3541,7 @@ prop_nextPeerAction_keepsRetained (ArbTxDecisionPolicy policy) (Positive peeradd idlePeerState :: PeerTxLocalState (Tx TxId) idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest policy } sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState PeerIdle) + { sharedPeers = Map.singleton peeraddr (mkSharedPeerState) , sharedRetainedTxs = retainedSingleton k retainUntil , sharedTxIdToKey = Map.singleton (getRawTxId txid) key , sharedKeyToTxId = IntMap.singleton k txid @@ -3589,8 +3582,8 @@ prop_nextPeerAction_earliestWakeDelay (ArbTxDecisionPolicy policy) (Positive pee keyB = TxKey 1 idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest policy } sharedPeers0 = Map.fromList - [ (peeraddr, withAdvertisedTxKeys [keyA] (mkSharedPeerState PeerIdle)) - , (owner, withAdvertisedTxKeys [keyA] (mkSharedPeerState PeerWaitingTxs)) + [ (peeraddr, withAdvertisedTxKeys [keyA] (mkSharedPeerState)) + , (owner, withAdvertisedTxKeys [keyA] (mkSharedPeerState)) ] leaseFirstState = emptySharedTxState { sharedPeers = sharedPeers0 @@ -3637,12 +3630,12 @@ prop_nextPeerAction_returnsPeerGeneration (ArbTxDecisionPolicy policy) (Positive sharedState0 = emptySharedTxState { sharedPeers = Map.fromList [ ( peeraddr - , (mkSharedPeerState PeerIdle) + , (mkSharedPeerState) { sharedPeerGeneration = expectedGeneration } ) , ( peeraddr + 1000 - , (mkSharedPeerState PeerIdle) + , (mkSharedPeerState) { sharedPeerGeneration = 11 } ) @@ -3680,9 +3673,9 @@ prop_handleSubmittedTxs_bumpsAdvertisers (ArbTxDecisionPolicy policy) (Positive k = unTxKey key sharedState0 = emptySharedTxState { sharedPeers = Map.fromList - [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerSubmittingToMempool)) - , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerWaitingTxs)) + [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState)) + , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState)) + , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState)) ] , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased owner (addTime 10 now) @@ -3717,10 +3710,10 @@ unit_advertisingPeersForTxExcept_scansAuthoritativePeerSets step = do k = unTxKey key sharedState0 = baseState { sharedPeers = Map.fromList - [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (unrelatedPeer, mkSharedPeerState PeerIdle) + [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState)) + , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState)) + , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState)) + , (unrelatedPeer, mkSharedPeerState) ] , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased owner (addTime 10 now) @@ -3757,10 +3750,10 @@ unit_removeAdvertisingPeersForResolvedTx_clearsAllAdvertisingPeers step = do k = unTxKey key sharedState0 = baseState { sharedPeers = Map.fromList - [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) - , (unrelatedPeer, mkSharedPeerState PeerIdle) + [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState)) + , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState)) + , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState)) + , (unrelatedPeer, mkSharedPeerState) ] , sharedTxTable = IntMap.singleton k TxEntry { txLease = TxLeased owner (addTime 10 now) @@ -3771,63 +3764,11 @@ unit_removeAdvertisingPeersForResolvedTx_clearsAllAdvertisingPeers step = do } } -unit_updatePeerPhase_wakesOnlyBecomingIdlePeer :: (String -> IO ()) -> Assertion -unit_updatePeerPhase_wakesOnlyBecomingIdlePeer step = do - step "Update a peer from waiting to idle" - sharedPeerPhase (lookupPeerOrFail peer sharedState') @?= PeerIdle - step "Assert only the becoming-idle peer generation changes" - sharedPeerGeneration (lookupPeerOrFail peer sharedState') @?= 6 - sharedPeerGeneration (lookupPeerOrFail other sharedState') @?= 11 - where - peer = 1 - other = 2 - sharedState0 = emptySharedTxState - { sharedPeers = Map.fromList - [ (peer, (mkSharedPeerState PeerWaitingTxs) { sharedPeerGeneration = 5 }) - , (other, (mkSharedPeerState PeerIdle) { sharedPeerGeneration = 11 }) - ] - } - sharedState' = updatePeerPhase peer PeerIdle sharedState0 - -unit_updatePeerPhase_wakesCompetingAdvertisers :: (String -> IO ()) -> Assertion -unit_updatePeerPhase_wakesCompetingAdvertisers step = do - step "Update an idle peer to a waiting phase" - sharedPeerPhase (lookupPeerOrFail leavingPeer sharedState') @?= PeerWaitingTxs - step "Assert no competing advertisers are woken by leaving idle under score-delay claiming" - sharedPeerGeneration (lookupPeerOrFail leavingPeer sharedState') @?= 5 - sharedPeerGeneration (lookupPeerOrFail competingPeer sharedState') @?= 11 - sharedPeerGeneration (lookupPeerOrFail unrelatedPeer sharedState') @?= 17 - where - leavingPeer = 1 - competingPeer = 2 - unrelatedPeer = 3 - txid = 1 - baseState = mkSharedState [txid] - key = lookupKeyOrFail txid baseState - k = unTxKey key - sharedState0 = baseState - { sharedPeers = Map.fromList - [ (leavingPeer, (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) { sharedPeerGeneration = 5 }) - , (competingPeer, (withAdvertisedTxKeys [key] (mkSharedPeerState PeerIdle)) { sharedPeerGeneration = 11 }) - , (unrelatedPeer, (mkSharedPeerState PeerIdle) { sharedPeerGeneration = 17 }) - ] - , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxClaimable (Time 0) - , txAdvertiserCount = 2 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = - txInflightMultiplicity defaultTxDecisionPolicy - } - } - sharedState' = updatePeerPhase leavingPeer PeerWaitingTxs sharedState0 - -- Generate a shared peer state. genSharedPeerState :: Gen SharedPeerState genSharedPeerState = do - sharedPeerPhase <- elements [PeerIdle, PeerWaitingTxIds, PeerWaitingTxs, PeerSubmittingToMempool] sharedPeerGeneration <- genSmallWord64 pure SharedPeerState { - sharedPeerPhase, sharedPeerAdvertisedTxKeys = IntSet.empty, sharedPeerGeneration } @@ -3879,6 +3820,7 @@ genPeerTxLocalState = sized $ \n -> do -- Generated peer states default to a zero score. let peerScoreValue = 0 pure PeerTxLocalState { + peerPhase = PeerIdle, peerUnacknowledgedTxIds, peerAvailableTxIds, peerRequestedTxs = requestedSet, @@ -4053,16 +3995,7 @@ deriveSharedPeers baseState peerSeeds activeEntries = Map.insertWith (\_ old -> old) peeraddr defaultPeerSeed acc buildPeerState peeraddr PeerSeed { peerSeedGeneration } = - let PeerDerivedUsage { - peerHasSubmitting, - peerHasRequestedTxs - } = Map.findWithDefault emptyPeerDerivedUsage peeraddr peerUsages - sharedPeerPhase - | peerHasSubmitting = PeerSubmittingToMempool - | peerHasRequestedTxs = PeerWaitingTxs - | otherwise = PeerIdle in SharedPeerState { - sharedPeerPhase, sharedPeerAdvertisedTxKeys = Map.findWithDefault IntSet.empty peeraddr peerAdvertisedKeys, sharedPeerGeneration = peerSeedGeneration @@ -4305,10 +4238,9 @@ mkTx txid txSize = Tx } -- Construct a peer fixture with zeroed generation. -mkSharedPeerState :: PeerPhase -> SharedPeerState -mkSharedPeerState sharedPeerPhase = +mkSharedPeerState :: SharedPeerState +mkSharedPeerState = SharedPeerState { - sharedPeerPhase, sharedPeerAdvertisedTxKeys = IntSet.empty, sharedPeerGeneration = 0 } @@ -4333,7 +4265,7 @@ ensurePeerAdvertisesTxKeys peeraddr txKeys st@SharedTxState { sharedPeers } = advertisedKeys = IntSet.fromList (map unTxKey txKeys) updatePeer Nothing = - Just (withAdvertisedTxKeys txKeys (mkSharedPeerState PeerIdle)) + Just (withAdvertisedTxKeys txKeys (mkSharedPeerState)) updatePeer (Just sharedPeerState) = Just (sharedPeerState { @@ -4717,7 +4649,7 @@ mkActiveSharedState allPeers ownerPeer resolvedAdvertisers txidsAndSizes = ] , sharedPeers = Map.fromList - [ (peeraddr, (mkSharedPeerState PeerIdle) { + [ (peeraddr, (mkSharedPeerState) { sharedPeerAdvertisedTxKeys = Map.findWithDefault IntSet.empty peeraddr peerAdvertisedKeys }) diff --git a/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs b/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs index 7fbdf4d2ad4..a9fd34d97f8 100644 --- a/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs +++ b/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs @@ -54,12 +54,8 @@ instance (Show txid, Show peeraddr) => LogFormatting (TraceTxLogic peeraddr txid | TxEntry { txAttempts = txAttempts' } <- activeEntries ] - peerPhases = - Map.toList $ - Map.fromListWith (+) - [ (show sharedPeerPhase', 1 :: Int) - | SharedPeerState { sharedPeerPhase = sharedPeerPhase' } <- Map.elems sharedPeers - ] + peerPhases :: [(String, Int)] + peerPhases = [] renderTxId txKey = maybe "" show (IntMap.lookup txKey sharedKeyToTxId) From bc1c97f305263b3899c2791b4df3e2a28f945529 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Sat, 2 May 2026 04:04:24 +0200 Subject: [PATCH 61/67] fixup: IntSet for retained lookups Use IntSet for faster lookups for retained keys. IntPSQ is still needed for quick removal of expeired entries. --- .../Network/TxSubmission/Inbound/V2/Types.hs | 64 +++++++++++-------- 1 file changed, 39 insertions(+), 25 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 694d4fba8f7..5d12448504b 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -470,7 +470,19 @@ data SharedTxState peeraddr txid = SharedTxState { deriving stock (Eq, Show, Generic) deriving anyclass (NFData, NoThunks) -type RetainedTxs = IntPSQ Time () +-- | Retained tx-key set with two indexes: +-- +-- * 'retainedQueue' is keyed on 'Time' for cheap earliest-expiry queries. +-- * 'retainedSet' shadows the keys for O(min(n, W)) 'retainedMember' +-- queries (the hot path: every received txid is checked here). +-- +-- Both are kept in lockstep by the 'retained*' helpers below. +data RetainedTxs = RetainedTxs { + retainedQueue :: !(IntPSQ Time ()), + retainedSet :: !IntSet + } + deriving stock (Eq, Show, Generic) + deriving anyclass (NFData, NoThunks) emptySharedTxState :: SharedTxState peeraddr txid emptySharedTxState = SharedTxState { @@ -484,11 +496,12 @@ emptySharedTxState = SharedTxState { } retainedEmpty :: RetainedTxs -retainedEmpty = IntPSQ.empty +retainedEmpty = RetainedTxs IntPSQ.empty IntSet.empty retainedSingleton :: Int -> Time -> RetainedTxs retainedSingleton k retainUntil = - IntPSQ.insert k retainUntil () retainedEmpty + RetainedTxs (IntPSQ.insert k retainUntil () IntPSQ.empty) + (IntSet.singleton k) retainedFromList :: [(Int, Time)] -> RetainedTxs retainedFromList = @@ -499,55 +512,55 @@ retainedToList = sortOn fst . fmap (\(k, retainUntil, ()) -> (k, retainUntil)) . IntPSQ.toList + . retainedQueue retainedSize :: RetainedTxs -> Int -retainedSize = IntPSQ.size +retainedSize = IntSet.size . retainedSet {-# INLINE retainedSize #-} retainedLookup :: Int -> RetainedTxs -> Maybe Time retainedLookup k retained = - fmap fst (IntPSQ.lookup k retained) + fmap fst (IntPSQ.lookup k (retainedQueue retained)) {-# INLINE retainedLookup #-} retainedMember :: Int -> RetainedTxs -> Bool -retainedMember k retained = - case IntPSQ.lookup k retained of - Just _ -> True - Nothing -> False +retainedMember k = IntSet.member k . retainedSet {-# INLINE retainedMember #-} retainedInsertMax :: Int -> Time -> RetainedTxs -> RetainedTxs -retainedInsertMax k retainUntil retained = - IntPSQ.insert k retainUntil' () retained +retainedInsertMax k retainUntil (RetainedTxs queue keys) = + RetainedTxs (IntPSQ.insert k retainUntil' () queue) + (IntSet.insert k keys) where retainUntil' = - case retainedLookup k retained of - Just existing -> max existing retainUntil - Nothing -> retainUntil + case IntPSQ.lookup k queue of + Just (existing, ()) -> max existing retainUntil + Nothing -> retainUntil {-# INLINE retainedInsertMax #-} retainedDeleteKeys :: IntSet -> RetainedTxs -> RetainedTxs -retainedDeleteKeys keys retained = - IntSet.foldl' (flip IntPSQ.delete) retained keys +retainedDeleteKeys ks (RetainedTxs queue keys) = + RetainedTxs (IntSet.foldl' (flip IntPSQ.delete) queue ks) + (keys `IntSet.difference` ks) {-# INLINE retainedDeleteKeys #-} retainedKeysSet :: RetainedTxs -> IntSet -retainedKeysSet = - IntPSQ.fold' (\k _ _ acc -> IntSet.insert k acc) IntSet.empty +retainedKeysSet = retainedSet {-# INLINE retainedKeysSet #-} retainedRestrictKeys :: RetainedTxs -> IntSet -> RetainedTxs -retainedRestrictKeys retained keys = - IntPSQ.fold' keep retainedEmpty retained +retainedRestrictKeys (RetainedTxs queue keys) ks = + RetainedTxs (IntPSQ.fold' keep IntPSQ.empty queue) + (IntSet.intersection keys ks) where keep k retainUntil _ - | IntSet.member k keys = IntPSQ.insert k retainUntil () - | otherwise = id + | IntSet.member k ks = IntPSQ.insert k retainUntil () + | otherwise = id {-# INLINE retainedRestrictKeys #-} retainedNextWake :: Time -> RetainedTxs -> Maybe Time retainedNextWake currentTime = - go + go . retainedQueue where go retained = case IntPSQ.minView retained of @@ -561,10 +574,11 @@ retainedNextWake currentTime = retainedExpiredKeys :: Time -> RetainedTxs -> IntSet retainedExpiredKeys currentTime retained = -- Quick exit if no TX has expired. - case IntPSQ.findMin retained of - Just (_, earliest, _) | earliest <= currentTime -> go IntSet.empty retained + case IntPSQ.findMin queue of + Just (_, earliest, _) | earliest <= currentTime -> go IntSet.empty queue _ -> IntSet.empty where + queue = retainedQueue retained go expired r = case IntPSQ.minView r of Just (k, retainUntil, (), r') From 8c15a23b365246590cc71702b18706fd3b12415d Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Mon, 4 May 2026 13:33:46 +0200 Subject: [PATCH 62/67] fixup: Reduce STM contention in V2 TxSubmission Reduce STM contention in V2 TxSubmission inbound by splitting per-peer in-flight bookkeeping out of SharedTxState into a small per-peer PeerTxInFlight TVar. SharedTxState is now only written when the shared state updates. The common case of a new peer advertising an existing txid is just a read operation for the shared state and a write operation into the peer local TVar. --- .../Test/Cardano/Network/Diffusion/Testnet.hs | 7 +- .../Diffusion/Testnet/MiniProtocols.hs | 10 +- .../bench/Bench/TxSubmissionV2Server.hs | 63 +- .../Network/TxSubmission/Inbound/V2/Policy.hs | 4 +- .../TxSubmission/Inbound/V2/Registry.hs | 394 +- .../Network/TxSubmission/Inbound/V2/State.hs | 942 +-- .../Network/TxSubmission/Inbound/V2/Types.hs | 142 +- .../Ouroboros/Network/OrphanInstances.hs | 36 +- .../Network/Diffusion/Node/Kernel.hs | 11 +- .../Ouroboros/Network/TxSubmission/AppV2.hs | 2 + .../Ouroboros/Network/TxSubmission/TxLogic.hs | 5634 ++++++----------- .../Ouroboros/Network/Tracing/TxSubmission.hs | 35 +- 12 files changed, 2731 insertions(+), 4549 deletions(-) diff --git a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs index 4819585197d..7b83621e1aa 100644 --- a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs +++ b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet.hs @@ -1580,12 +1580,7 @@ prop_check_inflight_ratio bi ds@(DiffusionScript simArgs _ _) = ] activeAttemptCount :: TxEntry peeraddr -> Int - activeAttemptCount TxEntry { txAttempts } = - length - [ () - | attempt <- Map.elems txAttempts - , attempt == TxDownloading || attempt == TxBuffered - ] + activeAttemptCount TxEntry { txAttempt } = txAttempt in tabulate "Max observeed ratio of inflight multiplicity by the max stipulated by the policy" (map (\m -> "has " ++ show m ++ " in flight - ratio: " diff --git a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs index 34657f7414c..e102f102b98 100644 --- a/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs +++ b/cardano-diffusion/tests/lib/Test/Cardano/Network/Diffusion/Testnet/MiniProtocols.hs @@ -108,7 +108,8 @@ import Ouroboros.Network.RethrowPolicy import Ouroboros.Network.TxSubmission.Inbound.V2 (TxSubmissionInitDelay (..), txSubmissionInboundV2) import Ouroboros.Network.TxSubmission.Inbound.V2.Policy (TxDecisionPolicy (..)) -import Ouroboros.Network.TxSubmission.Inbound.V2.Registry (SharedTxStateVar, +import Ouroboros.Network.TxSubmission.Inbound.V2.Registry + (PeerTxInFlightRegistry, SharedTxStateVar, TxSubmissionCountersVar, withPeer) import Ouroboros.Network.TxSubmission.Inbound.V2.Types (TraceTxLogic, TraceTxSubmissionInbound) @@ -391,7 +392,8 @@ applications debugTracer txSubmissionInboundTracer _txSubmissionInboundDebug nod (txSubmissionInitiator aaTxDecisionPolicy (nkMempool nodeKernel)) (txSubmissionResponder (nkMempool nodeKernel) (nkTxCountersVar nodeKernel) - (nkSharedTxStateVar nodeKernel)) + (nkSharedTxStateVar nodeKernel) + (nkPeerTxInFlightRegistry nodeKernel)) } ] , withWarm = WithWarm @@ -725,14 +727,16 @@ applications debugTracer txSubmissionInboundTracer _txSubmissionInboundDebug nod :: Mempool m TxId (Tx TxId) -> TxSubmissionCountersVar m -> SharedTxStateVar m NtNAddr Int + -> PeerTxInFlightRegistry m NtNAddr -> MiniProtocolCb (ResponderContext NtNAddr) ByteString m () - txSubmissionResponder mempool txCountersVar sharedTxStateVar = + txSubmissionResponder mempool txCountersVar sharedTxStateVar inFlightRegistry = MiniProtocolCb $ \ ResponderContext { rcConnectionId = connId@ConnectionId { remoteAddress = them }} channel -> do withPeer aaTxDecisionPolicy (getMempoolReader mempool) sharedTxStateVar + inFlightRegistry txCountersVar them $ \api -> do let server = txSubmissionInboundV2 diff --git a/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs b/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs index c1ea4134e7c..1160b7565d9 100644 --- a/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs +++ b/ouroboros-network/bench/Bench/TxSubmissionV2Server.hs @@ -31,7 +31,8 @@ import Ouroboros.Network.Protocol.TxSubmission2.Type import Ouroboros.Network.TxSubmission.Inbound.V2 (TxSubmissionInitDelay (NoTxSubmissionInitDelay), defaultTxDecisionPolicy, txSubmissionInboundV2) -import Ouroboros.Network.TxSubmission.Inbound.V2.Registry (newSharedTxStateVar, +import Ouroboros.Network.TxSubmission.Inbound.V2.Registry + (newPeerTxInFlightRegistry, newSharedTxStateVar, newTxSubmissionCountersVar, withPeer) import Ouroboros.Network.TxSubmission.Inbound.V2.Types (TxSubmissionCounters, emptySharedTxState) @@ -94,8 +95,7 @@ mkMultiPeerFixture peers batches = runDirectServerBenchmark :: DirectServerFixture -> IO DirectServerResult -runDirectServerBenchmark - DirectServerFixture { +runDirectServerBenchmark DirectServerFixture { dsPeerCount, dsTxIdReplyBatches, dsTxSize @@ -103,13 +103,17 @@ runDirectServerBenchmark inboundMempool <- emptyMempool duplicateTxIdsVar <- Lazy.newTVarIO [] sharedStateVar <- newSharedTxStateVar emptySharedTxState + inFlightRegistry <- newPeerTxInFlightRegistry countersVar <- newTxSubmissionCountersVar mempty - let runPeer addr = + let writer = getMempoolWriter duplicateTxIdsVar inboundMempool + + runPeer addr = withPeer defaultTxDecisionPolicy (getMempoolReader inboundMempool) sharedStateVar + inFlightRegistry countersVar addr $ \api -> do @@ -118,17 +122,15 @@ runDirectServerBenchmark nullTracer NoTxSubmissionInitDelay defaultTxDecisionPolicy - (getMempoolWriter duplicateTxIdsVar inboundMempool) + writer getTxSize api + stream = [1 .. dsTxIdReplyBatches * 6] case server of TxSubmissionServerPipelined initServer -> do st0 <- initServer - driveServer dsTxSize dsTxIdReplyBatches 1 [] st0 + driveServer dsTxSize stream [] st0 - -- Spawn one async per peer. Peers race for tx leases, contend on - -- the shared STM state, and exercise the scheduler the same way - -- they do on mainnet. mapConcurrently_ runPeer [1 .. dsPeerCount] (DirectServerResult @@ -138,36 +140,30 @@ runDirectServerBenchmark driveServer :: SizeInBytes - -> Int - -> TxId + -> [TxId] -> [PendingReply] -> ServerStIdle n TxId (Tx TxId) IO () -> IO () -driveServer !txSize !remainingBatches !nextTxId !pending = +driveServer !txSize !stream !pending = \case SendMsgRequestTxIdsBlocking _ req kDone k - | remainingBatches <= 0 -> kDone + | null stream -> kDone | otherwise -> do - let (txids, nextTxId') = mkTxIdReply txSize nextTxId req + let (txids, stream') = takeReply txSize req stream st' <- k (NonEmpty.fromList txids) - driveServer txSize (remainingBatches - 1) nextTxId' pending st' + driveServer txSize stream' pending st' SendMsgRequestTxIdsPipelined _ req k -> do - let (txids, nextTxId', remainingBatches') = - if remainingBatches <= 0 - then ([], nextTxId, remainingBatches) - else let (txids', nextTxId'') = mkTxIdReply txSize nextTxId req - in (txids', nextTxId'', remainingBatches - 1) + let (txids, stream') = takeReply txSize req stream pending' = pending ++ [PendingTxIds req txids] st' <- k - driveServer txSize remainingBatches' nextTxId' pending' st' + driveServer txSize stream' pending' st' SendMsgRequestTxsPipelined requested k -> do st' <- k driveServer txSize - remainingBatches - nextTxId + stream (pending ++ [PendingTxs requested]) st' @@ -175,27 +171,26 @@ driveServer !txSize !remainingBatches !nextTxId !pending = case pending of reply : pending' -> do st' <- collect (renderPendingReply reply) - driveServer txSize remainingBatches nextTxId pending' st' + driveServer txSize stream pending' st' [] -> case mNone of - Just k -> k >>= driveServer txSize remainingBatches nextTxId [] + Just k -> k >>= driveServer txSize stream [] Nothing -> error $ "TxSubmissionV2 direct benchmark: unexpected " ++ "CollectPipelined with no pending replies" -mkTxIdReply +-- | Take the next @req@-sized chunk from the precomputed txid stream. If +-- the stream is exhausted, return an empty reply and the empty stream. +takeReply :: SizeInBytes - -> TxId -> NumTxIdsToReq - -> ([(TxId, SizeInBytes)], TxId) -mkTxIdReply txSize nextTxId req = - ( [ (txid, txSize) - | txid <- [nextTxId .. nextTxId + replyCount - 1] - ] - , nextTxId + replyCount - ) + -> [TxId] + -> ([(TxId, SizeInBytes)], [TxId]) +takeReply txSize req stream = + let (taken, rest) = splitAt replyCount stream + in ([ (t, txSize) | t <- taken ], rest) where replyCount = fromIntegral (getNumTxIdsToReq req) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs index 60de4951bcd..b912adca4a9 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Policy.hs @@ -65,8 +65,8 @@ data TxDecisionPolicy = TxDecisionPolicy { -- ^ space between actual requests for the same TX. inflightTimeout :: !DiffTime - -- ^ Maximum time a peer's attempt may sit between claim and the - -- TxSubmitting state before the per-entry inflight-multiplicity + -- ^ Maximum time a peer's attempt may sit between claim and + -- entering submission before the per-entry inflight-multiplicity -- cap is bumped, allowing another peer to attempt in parallel. } deriving (Eq, Show) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 807b3c828fb..99314477064 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -4,9 +4,11 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.Registry ( SharedTxStateVar + , PeerTxInFlightRegistry , PeerTxAPI (..) , TxSubmissionCountersVar , newSharedTxStateVar + , newPeerTxInFlightRegistry , newTxSubmissionCountersVar , txCountersThreadV2 , withPeer @@ -20,6 +22,7 @@ import Control.Monad.Class.MonadTime.SI import Control.Monad.Class.MonadTimer.SI import Control.Tracer (Tracer, traceWith) import Data.IntMap.Strict qualified as IntMap +import Data.IntSet (IntSet) import Data.IntSet qualified as IntSet import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map @@ -39,6 +42,18 @@ type SharedTxStateVar m peeraddr txid = StrictTVar m (SharedTxState peeraddr txi -- | STM handle for V2 monotonic counters. type TxSubmissionCountersVar m = StrictTVar m TxSubmissionCounters +-- | Per-peer in-flight TVar. +type PeerTxInFlightVar m = StrictTVar m PeerTxInFlight + +-- | Registry of every live peer's 'PeerTxInFlightVar'. +-- +-- 'withPeer' adds the peer's TVar on bracket-enter and removes it on +-- bracket-exit (after scrubbing any contributions the peer still has +-- to shared state). The sweep snapshots this map to compute the +-- @liveAdvertised@ union without touching peer-local protocol state. +type PeerTxInFlightRegistry m peeraddr = + StrictTVar m (Map peeraddr (PeerTxInFlightVar m)) + newSharedTxStateVar :: MonadSTM m => SharedTxState peeraddr txid @@ -51,12 +66,21 @@ newTxSubmissionCountersVar -> m (TxSubmissionCountersVar m) newTxSubmissionCountersVar = newTVarIO +newPeerTxInFlightRegistry + :: MonadSTM m + => m (PeerTxInFlightRegistry m peeraddr) +newPeerTxInFlightRegistry = newTVarIO Map.empty + -- | Central bookkeeping thread for V2. -- --- Wakes every @'bufferedTxsMinLifetime' policy \/ 4@ seconds to run --- 'State.sweepSharedState' on the shared tx state (retention expiry + orphan --- GC). On a slower cadence (every 'countersInterval' seconds of elapsed time) --- it also emits the current counters when they differ from the last emission. +-- Wakes every @'bufferedTxsMinLifetime' policy \/ 4@ seconds (capped +-- between 100 ms and 1 s) to run 'State.sweepSharedState' on the +-- shared tx state. The sweep needs the union of every live peer's +-- 'pifAdvertised' to decide which entries are still wanted; the +-- registry is read inside the same STM transaction as the sweep so +-- the snapshot is coherent. On a slower cadence (every +-- 'countersInterval' seconds of elapsed time) it also emits the +-- current counters when they differ from the last emission. txCountersThreadV2 :: forall m peeraddr txid. (MonadDelay m, MonadSTM m, HasRawTxId txid) @@ -64,13 +88,14 @@ txCountersThreadV2 -> Tracer m TxSubmissionCounters -> TxSubmissionCountersVar m -> SharedTxStateVar m peeraddr txid + -> PeerTxInFlightRegistry m peeraddr -> m Void -txCountersThreadV2 policy tracer countersVar sharedStateVar = do +txCountersThreadV2 policy tracer countersVar sharedStateVar registry = do now <- getMonotonicTime go mempty (addTime countersInterval now) where sweepInterval :: DiffTime - sweepInterval = min 1 (bufferedTxsMinLifetime policy / 4) + sweepInterval = max 0.1 (min 1 (bufferedTxsMinLifetime policy / 4)) countersInterval :: DiffTime countersInterval = 7 @@ -78,7 +103,9 @@ txCountersThreadV2 policy tracer countersVar sharedStateVar = do go !previous !nextEmitAt = do threadDelay sweepInterval now <- getMonotonicTime - atomically $ modifyTVar sharedStateVar (State.sweepSharedState now) + atomically $ do + liveAdvertised <- snapshotLiveAdvertised registry + modifyTVar sharedStateVar (State.sweepSharedState now liveAdvertised) if now >= nextEmitAt then do current <- readTVarIO countersVar @@ -86,13 +113,29 @@ txCountersThreadV2 policy tracer countersVar sharedStateVar = do go current (addTime countersInterval now) else go previous nextEmitAt +-- | Read every live peer's 'pifAdvertised' and union them. +snapshotLiveAdvertised + :: MonadSTM m + => PeerTxInFlightRegistry m peeraddr + -> STM m IntSet +snapshotLiveAdvertised registry = do + peers <- readTVar registry + foldr step (pure IntSet.empty) (Map.elems peers) + where + step var k = do + pif <- readTVar var + acc <- k + pure $! IntSet.union (pifAdvertised pif) acc + -- | Peer-facing coordination API. -- --- The peer thread keeps its local protocol state in an local --- variable. Registry helpers operate only on the shared STM state; any helper --- that needs peer-local state should take it explicitly as an argument. +-- The peer thread keeps its local protocol state in a local +-- variable. Registry helpers operate only on the shared STM state +-- and the per-peer 'PeerTxInFlight' TVar (closure-captured); any +-- helper that needs peer-local state should take it explicitly as an +-- argument. data PeerTxAPI m txid tx = PeerTxAPI { - -- | Wait until either the peer's generation changes from the given + -- | Wait until either 'sharedGeneration' moves past the given -- value or the optional timeout expires. awaitSharedChange :: Word64 -> Maybe DiffTime @@ -157,132 +200,123 @@ withPeer => TxDecisionPolicy -> TxSubmissionMempoolReader txid tx idx m -> SharedTxStateVar m peeraddr txid + -> PeerTxInFlightRegistry m peeraddr -> TxSubmissionCountersVar m -> peeraddr -> (PeerTxAPI m txid tx -> m a) -> m a -withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } sharedStateVar countersVar peeraddr io = - bracket - (do - atomically $ modifyTVar sharedStateVar registerPeer - pure PeerTxAPI { - awaitSharedChange = awaitSharedChangeImp sharedStateVar peeraddr - , runNextPeerAction = runNextPeerActionImp policy sharedStateVar countersVar peeraddr - , runNextPeerActionPipelined = runNextPeerActionPipelinedImp policy sharedStateVar - countersVar peeraddr - , applyReceivedTxIds = applyReceivedTxIdsImp policy mempoolGetSnapshot sharedStateVar - countersVar peeraddr - , applyReceivedTxs = applyReceivedTxsImp policy mempoolGetSnapshot sharedStateVar - countersVar peeraddr - , applySubmittedTxs = applySubmittedTxsImp policy sharedStateVar countersVar peeraddr - , resolveTxRequest = resolveTxRequestImp sharedStateVar - , resolveBufferedTxs = resolveBufferedTxsImp sharedStateVar - , addCounters = \delta -> atomically $ modifyTVar countersVar (<> delta) - } - ) - (\_ -> do - now <- getMonotonicTime - atomically $ modifyTVar sharedStateVar (unregisterPeer now)) - io +withPeer policy TxSubmissionMempoolReader { mempoolGetSnapshot } + sharedStateVar registry countersVar peeraddr io = + bracket acquire release run where - registerPeer :: SharedTxState peeraddr txid -> SharedTxState peeraddr txid - registerPeer st@SharedTxState { sharedPeers, sharedGeneration } = - st { - sharedPeers = Map.insert peeraddr sharedPeerState sharedPeers, - sharedGeneration = sharedGeneration + 1 - } - where - sharedPeerState = SharedPeerState { - sharedPeerAdvertisedTxKeys = IntSet.empty, - sharedPeerGeneration = 0 - } - - unregisterPeer :: Time -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid - unregisterPeer now st@SharedTxState { sharedPeers, sharedTxTable, sharedRetainedTxs, sharedTxIdToKey, sharedKeyToTxId, sharedGeneration } = - bumpPeerGenerations peersToWake $ st { - sharedPeers = sharedPeers', - sharedTxTable = sharedTxTable', - sharedRetainedTxs = sharedRetainedTxs, - sharedTxIdToKey = Map.filter (\txKey -> IntSet.member (unTxKey txKey) liveKeys) sharedTxIdToKey, - sharedKeyToTxId = IntMap.restrictKeys sharedKeyToTxId liveKeys, - sharedGeneration = sharedGeneration + 1 + acquire = do + peerInFlightVar <- newTVarIO emptyPeerTxInFlight + atomically $ modifyTVar registry (Map.insert peeraddr peerInFlightVar) + pure peerInFlightVar + + release peerInFlightVar = do + now <- getMonotonicTime + atomically $ do + pif <- readTVar peerInFlightVar + modifyTVar sharedStateVar (scrubFromPeerInFlight peeraddr now pif) + modifyTVar registry (Map.delete peeraddr) + + run peerInFlightVar = io PeerTxAPI { + awaitSharedChange = awaitSharedChangeImp sharedStateVar + , runNextPeerAction = runNextPeerActionImp policy sharedStateVar + peerInFlightVar countersVar peeraddr + , runNextPeerActionPipelined = runNextPeerActionPipelinedImp policy + sharedStateVar peerInFlightVar + countersVar peeraddr + , applyReceivedTxIds = applyReceivedTxIdsImp policy mempoolGetSnapshot + sharedStateVar peerInFlightVar countersVar + , applyReceivedTxs = applyReceivedTxsImp policy mempoolGetSnapshot + sharedStateVar peerInFlightVar countersVar peeraddr + , applySubmittedTxs = applySubmittedTxsImp policy sharedStateVar + peerInFlightVar countersVar peeraddr + , resolveTxRequest = resolveTxRequestImp sharedStateVar + , resolveBufferedTxs = resolveBufferedTxsImp sharedStateVar + , addCounters = \delta -> atomically $ modifyTVar countersVar (<> delta) + } + +-- | Reverse this peer's still-outstanding contributions to the shared +-- 'TxEntry' counters. Run by the bracket finalizer; uses the per-peer +-- TVar snapshot taken at exit time. +-- +-- 'pifLeased' is best-effort (another peer can steal the lease in the +-- meantime), so the lease release verifies the entry still names this +-- peer as owner before claiming. +scrubFromPeerInFlight + :: Eq peeraddr + => peeraddr + -> Time + -> PeerTxInFlight + -> SharedTxState peeraddr txid + -> SharedTxState peeraddr txid +scrubFromPeerInFlight peeraddr now pif st + | nothingToDo = st + | otherwise = st { + sharedTxTable = sharedTxTable', + sharedGeneration = sharedGeneration st + 1 } - where - leavingAdvertisedKeys = - maybe IntSet.empty sharedPeerAdvertisedTxKeys (Map.lookup peeraddr sharedPeers) - sharedPeers' = Map.delete peeraddr sharedPeers - - scanState = - st { sharedPeers = sharedPeers' } - - (sharedTxTable', wakeKeys) = - IntSet.foldl' scrubOne (sharedTxTable, IntSet.empty) leavingAdvertisedKeys - peersToWake = - State.advertisingPeersForTxKeysExcept peeraddr wakeKeys scanState - liveKeys = IntMap.keysSet sharedTxTable' `IntSet.union` retainedKeysSet sharedRetainedTxs - - scrubOne (txTableAcc, wakeKeysAcc) k = - case IntMap.lookup k txTableAcc of - Just txEntry -> - let txEntry' = scrubTxEntry txEntry - txTableAcc' = - if txLive txEntry' - then IntMap.insert k txEntry' txTableAcc - else IntMap.delete k txTableAcc - wakeKeysAcc' = - if txLive txEntry' - then IntSet.insert k wakeKeysAcc - else wakeKeysAcc - in (txTableAcc', wakeKeysAcc') - Nothing -> - (txTableAcc, wakeKeysAcc) - - scrubTxEntry txEntry@TxEntry { txLease, txAdvertiserCount, txAttempts } = - txEntry { - txLease = scrubLease txLease, - txAdvertiserCount = txAdvertiserCount - 1, - txAttempts = Map.delete peeraddr txAttempts - } - - scrubLease (TxLeased owner leaseUntil) - | owner == peeraddr = TxClaimable now - | otherwise = TxLeased owner leaseUntil - scrubLease claimable@TxClaimable {} = claimable - - txLive TxEntry { txLease, txAdvertiserCount, txAttempts } = - leaseLive txLease - || txAdvertiserCount > 0 - || not (Map.null txAttempts) - - leaseLive TxClaimable {} = False - leaseLive (TxLeased _ _) = True - --- | Wait until either the peer's generation changes from the given --- value or the optional timeout expires. + where + nothingToDo = + IntSet.null (pifLeased pif) + && IntSet.null (pifAttempting pif) + && IntSet.null (pifSubmitting pif) + + sharedTxTable' = + IntSet.foldl' clearSubmission + (IntSet.foldl' decAttempt + (IntSet.foldl' releaseLease (sharedTxTable st) + (pifLeased pif)) + (pifAttempting pif)) + (pifSubmitting pif) + + releaseLease tbl k = + IntMap.adjust + (\entry -> case txLease entry of + TxLeased owner _ | owner == peeraddr -> + entry { txLease = TxClaimable now } + _ -> entry) + k tbl + + decAttempt tbl k = + IntMap.adjust + (\entry -> entry { txAttempt = max 0 (txAttempt entry - 1) }) + k tbl + + clearSubmission tbl k = + IntMap.adjust + (\entry -> entry { txInSubmission = False }) + k tbl + +-- | Wait until either 'sharedGeneration' moves past the given value or the +-- optional timeout expires. -- --- Used by idle peers to avoid busy-waiting while still being woken when relevant cross-peer --- state (such as lease expiries or new tx advertisements) changes. -awaitSharedChangeImp :: ( MonadTimer m - , Ord peeraddr ) +-- Used by idle peers to avoid busy-waiting while still being woken when +-- shared state changes (lease expiries, new tx advertisements, mempool +-- resolutions). A spurious wake on a change that doesn't grant this peer +-- new work is harmless: the peer immediately re-runs 'nextPeerAction', +-- selects 'PeerDoNothing' again, and goes back to sleep on the new +-- generation value. +awaitSharedChangeImp :: MonadTimer m => SharedTxStateVar m peeraddr txid - -> peeraddr -> Word64 -> Maybe DiffTime -> m () -awaitSharedChangeImp sharedStateVar peeraddr generation mDelay = +awaitSharedChangeImp sharedStateVar generation mDelay = case mDelay of Nothing -> atomically $ do sharedState <- readTVar sharedStateVar - let generation' = peerGenerationOf peeraddr sharedState - check (generation' /= generation) + check (sharedGeneration sharedState /= generation) Just delay -> do delayVar <- registerDelay delay atomically $ do sharedState <- readTVar sharedStateVar - let generation' = peerGenerationOf peeraddr sharedState expired <- Lazy.readTVar delayVar - check (generation' /= generation || expired) + check (sharedGeneration sharedState /= generation || expired) -- | Avoid rewriting the shared TVar when the pure state step made no shared -- change. Callers use 'sharedGeneration' as the dirty bit for shared state. @@ -295,6 +329,16 @@ writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' | sharedGeneration sharedState' == sharedGeneration0 = pure () | otherwise = writeTVar sharedStateVar sharedState' +-- | Avoid rewriting the per-peer TVar when nothing changed. +writePeerInFlightIfChanged :: MonadSTM m + => PeerTxInFlightVar m + -> PeerTxInFlight + -> PeerTxInFlight + -> STM m () +writePeerInFlightIfChanged var before after + | before == after = pure () + | otherwise = writeTVar var after + -- | Update the counters for the action chosen by the peer scheduler. -- updateCountersForAction :: MonadSTM m @@ -321,111 +365,107 @@ updateCountersForAction countersVar peerAction = _ -> pure () -- | Compute the next action for this peer in non-pipelined mode. --- --- Returns the selected 'PeerAction', an updated peer-local state, and applies --- changes to shared state (such as lease/advertiser coordination). --- Called from the main peer loop when not handling pipelined replies. runNextPeerActionImp :: ( MonadSTM m - , Ord peeraddr - , HasRawTxId txid ) + , Ord peeraddr ) => TxDecisionPolicy -> SharedTxStateVar m peeraddr txid + -> PeerTxInFlightVar m -> TxSubmissionCountersVar m -> peeraddr -> Time -> PeerTxLocalState tx -> m (PeerAction, PeerTxLocalState tx) -runNextPeerActionImp policy sharedStateVar countersVar peeraddr now peerState = atomically $ do +runNextPeerActionImp policy sharedStateVar peerInFlightVar countersVar peeraddr + now peerState = atomically $ do sharedState <- readTVar sharedStateVar + peerInFlight <- readTVar peerInFlightVar let sharedGeneration0 = sharedGeneration sharedState - (peerAction, peerState', sharedState') = State.nextPeerAction now policy peeraddr - peerState sharedState + (peerAction, peerState', peerInFlight', sharedState') = + State.nextPeerAction now policy peeraddr peerState peerInFlight sharedState writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' + writePeerInFlightIfChanged peerInFlightVar peerInFlight peerInFlight' updateCountersForAction countersVar peerAction return (peerAction, peerState') -- | Compute the next action for this peer in pipelined mode. --- --- Similar to 'runNextPeerAction' but allows pipelined txid request messages where --- both acknowledgments and requests can be sent together. Used when waiting for --- pipelined protocol replies. runNextPeerActionPipelinedImp :: ( MonadSTM m - , Ord peeraddr - , HasRawTxId txid ) + , Ord peeraddr ) => TxDecisionPolicy -> SharedTxStateVar m peeraddr txid + -> PeerTxInFlightVar m -> TxSubmissionCountersVar m -> peeraddr -> Time -> PeerTxLocalState tx -> m (PeerAction, PeerTxLocalState tx) -runNextPeerActionPipelinedImp policy sharedStateVar countersVar peeraddr now peerState = +runNextPeerActionPipelinedImp policy sharedStateVar peerInFlightVar countersVar + peeraddr now peerState = atomically $ do sharedState <- readTVar sharedStateVar + peerInFlight <- readTVar peerInFlightVar let sharedGeneration0 = sharedGeneration sharedState - (peerAction, peerState', sharedState') = State.nextPeerActionPipelined now policy - peeraddr peerState sharedState + (peerAction, peerState', peerInFlight', sharedState') = + State.nextPeerActionPipelined now policy peeraddr peerState + peerInFlight sharedState writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' + writePeerInFlightIfChanged peerInFlightVar peerInFlight peerInFlight' updateCountersForAction countersVar peerAction return (peerAction, peerState') -- | Process a batch of txids received from this peer. --- --- Interns new txids into the shared state, updates the peer's unacknowledged queue, --- handles mempool fast-path for already-known txids, and leaves fresh txids --- claimable so any advertising peer can later claim them. Returns updated --- peer-local state. applyReceivedTxIdsImp :: ( MonadSTM m - , Ord peeraddr , HasRawTxId txid ) => TxDecisionPolicy -> STM m (MempoolSnapshot txid tx idx) -> SharedTxStateVar m peeraddr txid + -> PeerTxInFlightVar m -> TxSubmissionCountersVar m - -> peeraddr -> Time -> NumTxIdsToReq -> [(txid, SizeInBytes)] -> PeerTxLocalState tx -> m (PeerTxLocalState tx) -applyReceivedTxIdsImp policy mempoolGetSnapshot sharedStateVar countersVar peeraddr now - txIdsToReq txidsAndSizes peerState = atomically $ do - MempoolSnapshot { mempoolHasTx } <- mempoolGetSnapshot - sharedState <- readTVar sharedStateVar - let sharedGeneration0 = sharedGeneration sharedState - (peerState', sharedState') = State.handleReceivedTxIds mempoolHasTx now policy peeraddr - txIdsToReq txidsAndSizes peerState sharedState - writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' - modifyTVar countersVar (<> mempty { txIdRepliesReceived = 1 - , txIdsReceived = fromIntegral (length txidsAndSizes) }) - return peerState' +applyReceivedTxIdsImp policy mempoolGetSnapshot sharedStateVar peerInFlightVar + countersVar now txIdsToReq txidsAndSizes peerState = + atomically $ do + MempoolSnapshot { mempoolHasTx } <- mempoolGetSnapshot + sharedState <- readTVar sharedStateVar + peerInFlight <- readTVar peerInFlightVar + let sharedGeneration0 = sharedGeneration sharedState + (peerState', peerInFlight', sharedState') = + State.handleReceivedTxIds mempoolHasTx now policy txIdsToReq txidsAndSizes + peerState peerInFlight sharedState + writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' + writePeerInFlightIfChanged peerInFlightVar peerInFlight peerInFlight' + modifyTVar countersVar (<> mempty { txIdRepliesReceived = 1 + , txIdsReceived = fromIntegral (length txidsAndSizes) }) + return peerState' -- | Process a batch of tx bodies received from this peer. --- --- Buffers the received bodies in peer-local state, updates shared advertiser tracking, --- and handles omitted bodies by releasing ownership so other advertisers may claim them. --- Returns the combined penalty count for bodies that were already resolved locally or --- missing from the reply, together with the updated peer-local state. applyReceivedTxsImp :: ( MonadSTM m - , Ord peeraddr + , Eq peeraddr , HasRawTxId txid ) => TxDecisionPolicy -> STM m (MempoolSnapshot txid tx idx) -> SharedTxStateVar m peeraddr txid + -> PeerTxInFlightVar m -> TxSubmissionCountersVar m -> peeraddr -> Time -> [(txid, tx)] -> PeerTxLocalState tx -> m (Int, PeerTxLocalState tx) -applyReceivedTxsImp policy mempoolGetSnapshot sharedStateVar countersVar peeraddr now txs - peerState = atomically $ do +applyReceivedTxsImp policy mempoolGetSnapshot sharedStateVar peerInFlightVar + countersVar peeraddr now txs peerState = atomically $ do MempoolSnapshot { mempoolHasTx } <- mempoolGetSnapshot sharedState <- readTVar sharedStateVar + peerInFlight <- readTVar peerInFlightVar let sharedGeneration0 = sharedGeneration sharedState - let (omittedCount, lateCount, peerState', sharedState') = - State.handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState + (omittedCount, lateCount, peerState', peerInFlight', sharedState') = + State.handleReceivedTxs mempoolHasTx now policy peeraddr txs + peerState peerInFlight sharedState writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' + writePeerInFlightIfChanged peerInFlightVar peerInFlight peerInFlight' modifyTVar countersVar (<> mempty { txRepliesReceived = 1, txsReceived = fromIntegral (length txs), @@ -435,16 +475,11 @@ applyReceivedTxsImp policy mempoolGetSnapshot sharedStateVar countersVar peeradd return (omittedCount + lateCount, peerState') -- | Mark txs as submitted to the mempool and update shared state. --- --- Takes the keys that were resolved (either already-in-mempool or successfully --- submitted) and the keys that were rejected, updating lease ownership, advertiser --- ack states, and peer rejection scores. --- Returns updated peer-local state. applySubmittedTxsImp :: ( MonadSTM m - , Ord peeraddr - , HasRawTxId txid ) + , Eq peeraddr ) => TxDecisionPolicy -> SharedTxStateVar m peeraddr txid + -> PeerTxInFlightVar m -> TxSubmissionCountersVar m -> peeraddr -> Time @@ -452,22 +487,22 @@ applySubmittedTxsImp :: ( MonadSTM m -> [TxKey] -> PeerTxLocalState tx -> m (PeerTxLocalState tx) -applySubmittedTxsImp policy sharedStateVar countersVar peeraddr now acceptedTxs rejectedTxs - peerState = +applySubmittedTxsImp policy sharedStateVar peerInFlightVar countersVar peeraddr + now acceptedTxs rejectedTxs peerState = atomically $ do sharedState <- readTVar sharedStateVar + peerInFlight <- readTVar peerInFlightVar let sharedGeneration0 = sharedGeneration sharedState - let (peerState', sharedState') = State.handleSubmittedTxs now policy peeraddr acceptedTxs - rejectedTxs peerState sharedState + (peerState', peerInFlight', sharedState') = + State.handleSubmittedTxs now policy peeraddr acceptedTxs + rejectedTxs peerState peerInFlight sharedState writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' + writePeerInFlightIfChanged peerInFlightVar peerInFlight peerInFlight' modifyTVar countersVar (<> mempty { txsAccepted = fromIntegral (length acceptedTxs) , txsRejected = fromIntegral (length rejectedTxs) }) return peerState' -- | Resolve txids and advertised sizes for a batch of tx keys to request. --- --- Looks up the real txid and size from peer-local state for building the --- protocol message. Used before sending 'MsgRequestTxs'. resolveTxRequestImp :: ( MonadSTM m , Ord txid ) => SharedTxStateVar m peeraddr txid @@ -486,10 +521,6 @@ resolveTxRequestImp sharedStateVar peerState txKeys = atomically $ do ) -- | Resolve buffered tx bodies into full submission records. --- --- Takes tx keys that have been downloaded and buffered locally, looks up their txids and --- body values from peer-local state, and returns triples ready for mempool submission. --- Used when submitting txs after body collection. resolveBufferedTxsImp :: ( MonadSTM m ) => SharedTxStateVar m peeraddr txid @@ -507,4 +538,3 @@ resolveBufferedTxsImp sharedStateVar peerState txKeys = atomically $ do Just tx -> tx Nothing -> error "TxSubmission.V2.resolveBufferedTxsImp: missing buffered tx" ) - diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 81e7cf5ea0b..474fc7dcc17 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -9,9 +9,6 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.State , markSubmittingTxs , nextPeerAction , nextPeerActionPipelined - , advertisingPeersForTxKeysExcept - , advertisingPeersForTxExcept - , removeAdvertisingPeersForResolvedTx , currentPeerScore , drainPeerScore , applyPeerRejections @@ -21,10 +18,10 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.State import Control.Monad.Class.MonadTime.SI (DiffTime, Time, addTime, diffTime) import Data.Foldable (foldl', toList) import Data.IntMap.Strict qualified as IntMap +import Data.IntSet (IntSet) import Data.IntSet qualified as IntSet import Data.Map.Strict qualified as Map import Data.Sequence.Strict qualified as StrictSeq -import Data.Set qualified as Set import Data.Word (Word64) import Ouroboros.Network.Protocol.TxSubmission2.Type (NumTxIdsToAck, @@ -40,19 +37,19 @@ data TxIdRequestMode = AllowAnyTxIdRequests | AllowPipelinedTxIdRequests -- data PeerActionContext peeraddr txid tx = PeerActionContext { -- | Current time used for lease expiry and score decay decisions. - pacNow :: !Time, + pacNow :: !Time, -- | Decision policy that governs request, retry, and scoring limits. - pacPolicy :: !TxDecisionPolicy, + pacPolicy :: !TxDecisionPolicy, -- | Address of the peer whose next action is being chosen. - pacPeerAddr :: !peeraddr, + pacPeerAddr :: !peeraddr, -- | Current peer-local state after local pruning has been applied. - pacPeerState :: !(PeerTxLocalState tx), + pacPeerState :: !(PeerTxLocalState tx), + -- | This peer's contribution counters mirroring shared-state writes. + pacPeerInFlight :: !PeerTxInFlight, -- | Shared tx-submission state after shared pruning has been applied. - pacSharedState :: !(SharedTxState peeraddr txid), - -- | This peer's shared state after pruning. - pacSharedPeerState :: !SharedPeerState, + pacSharedState :: !(SharedTxState peeraddr txid), -- | Score-derived delay this peer must wait after a tx becomes claimable. - pacClaimDelay :: !DiffTime + pacClaimDelay :: !DiffTime } data PeerActionChoice peeraddr = @@ -64,21 +61,21 @@ data PeerActionChoice peeraddr = -- | Build a precomputed context for selecting the next action for a peer. -- -- -mkPeerActionContext :: Ord peeraddr - => Time +mkPeerActionContext :: Time -> TxDecisionPolicy -> peeraddr -> PeerTxLocalState tx + -> PeerTxInFlight -> SharedTxState peeraddr txid -> PeerActionContext peeraddr txid tx -mkPeerActionContext now policy peeraddr peerState sharedState = +mkPeerActionContext now policy peeraddr peerState peerInFlight sharedState = PeerActionContext { pacNow = now, pacPolicy = policy, pacPeerAddr = peeraddr, pacPeerState = peerState', + pacPeerInFlight = peerInFlight, pacSharedState = sharedState, - pacSharedPeerState = sharedPeerState', pacClaimDelay = peerClaimDelay policy now (peerScore peerState') } where @@ -91,31 +88,27 @@ mkPeerActionContext now policy peeraddr peerState sharedState = peerDownloadedTxs = IntMap.intersection downloaded (sharedTxTable sharedState) } - sharedPeerState' = - case Map.lookup peeraddr (sharedPeers sharedState) of - Just sharedPeerState -> sharedPeerState - Nothing -> - error "TxSubmission.V2.mkPeerActionContext: missing peer" - -- | Compute the next peer-local action. -nextPeerAction :: (Ord peeraddr, HasRawTxId txid) +nextPeerAction :: Ord peeraddr => Time -> TxDecisionPolicy -> peeraddr -> PeerTxLocalState tx + -> PeerTxInFlight -> SharedTxState peeraddr txid - -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) + -> (PeerAction, PeerTxLocalState tx, PeerTxInFlight, SharedTxState peeraddr txid) nextPeerAction = nextPeerActionWithMode AllowAnyTxIdRequests {-# INLINABLE nextPeerAction #-} -- | Pipelined version of nextPeerAction -nextPeerActionPipelined :: (Ord peeraddr, HasRawTxId txid) +nextPeerActionPipelined :: Ord peeraddr => Time -> TxDecisionPolicy -> peeraddr -> PeerTxLocalState tx + -> PeerTxInFlight -> SharedTxState peeraddr txid - -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) + -> (PeerAction, PeerTxLocalState tx, PeerTxInFlight, SharedTxState peeraddr txid) nextPeerActionPipelined = nextPeerActionWithMode AllowPipelinedTxIdRequests {-# INLINABLE nextPeerActionPipelined #-} @@ -124,25 +117,27 @@ nextPeerActionPipelined = nextPeerActionWithMode AllowPipelinedTxIdRequests -- nextPeerActionWithMode handles body requests for txs this peer may currently -- fetch, tx submission for bodies buffered locally by this peer, and txid ack/request -- messages. Updates 'peerPhase' on the returned 'PeerTxLocalState' to --- reflect the chosen action. -nextPeerActionWithMode :: (Ord peeraddr, HasRawTxId txid) +-- reflect the chosen action and threads the per-peer 'PeerTxInFlight' +-- counters. +nextPeerActionWithMode :: Ord peeraddr => TxIdRequestMode -> Time -> TxDecisionPolicy -> peeraddr -> PeerTxLocalState tx + -> PeerTxInFlight -> SharedTxState peeraddr txid - -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) -nextPeerActionWithMode txIdRequestMode now policy peeraddr peerState sharedState = - let (action, peerState', sharedState'') = + -> (PeerAction, PeerTxLocalState tx, PeerTxInFlight, SharedTxState peeraddr txid) +nextPeerActionWithMode txIdRequestMode now policy peeraddr peerState peerInFlight sharedState = + let (action, peerState', peerInFlight', sharedState'') = applyPeerActionChoice ctx (pickPeerActionChoice txIdRequestMode ctx) peerState'' = peerState' { peerPhase = phaseForAction txIdRequestMode (peerPhase peerState) action } - in (action, peerState'', sharedState'') + in (action, peerState'', peerInFlight', sharedState'') where - sharedState' = bumpStuckEntries now policy peeraddr peerState sharedState - ctx = mkPeerActionContext now policy peeraddr peerState sharedState' + sharedState' = bumpStuckEntries now policy peerState sharedState + ctx = mkPeerActionContext now policy peeraddr peerState peerInFlight sharedState' -- | Compute the new 'PeerPhase' for the chosen 'PeerAction'. -- @@ -181,13 +176,12 @@ pickPeerActionChoice txIdRequestMode ctx in ChooseRequestTxIds flavour acknowledgedTxIds txIdsToAcknowledge txIdsToRequest unacknowledgedTxIds' -- Do nothing | otherwise = - ChooseDoNothing (peerGenerationOf (pacPeerAddr ctx) (pacSharedState ctx)) (nextWakeDelay ctx) + ChooseDoNothing (sharedGeneration (pacSharedState ctx)) (nextWakeDelay ctx) -- | Execute a chosen peer action and compute resulting state updates -applyPeerActionChoice :: (Ord peeraddr, HasRawTxId txid) - => PeerActionContext peeraddr txid tx +applyPeerActionChoice :: PeerActionContext peeraddr txid tx -> PeerActionChoice peeraddr - -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) + -> (PeerAction, PeerTxLocalState tx, PeerTxInFlight, SharedTxState peeraddr txid) applyPeerActionChoice ctx choice = case choice of ChooseSubmit txsToSubmit -> @@ -203,41 +197,55 @@ applyPeerActionChoice ctx choice = -- | Construct a 'PeerSubmitTxs' action for buffered transactions. -- --- Marks the selected txs as 'TxSubmitting' on this peer in the shared state so --- other peers' skip them via 'txSubmittingByOther'. -applySubmitChoice :: Ord peeraddr - => PeerActionContext peeraddr txid tx +-- Marks the selected txs as in-submission on this peer. Other peers +-- skip them via 'txSubmittingByOther'. STM serialisation guarantees +-- only one peer can win the @ChooseSubmit@ race for a given key. +applySubmitChoice :: PeerActionContext peeraddr txid tx -> [TxKey] - -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) + -> (PeerAction, PeerTxLocalState tx, PeerTxInFlight, SharedTxState peeraddr txid) applySubmitChoice ctx txsToSubmit = - ( PeerSubmitTxs txsToSubmit - , pacPeerState ctx - , markSubmittingTxs (pacPeerAddr ctx) txsToSubmit (pacSharedState ctx) - ) + let keys = IntSet.fromList (unTxKey <$> txsToSubmit) + pif = pacPeerInFlight ctx + pif' = pif { + pifAttempting = pifAttempting pif `IntSet.difference` keys, + pifSubmitting = pifSubmitting pif `IntSet.union` keys + } + in ( PeerSubmitTxs txsToSubmit + , pacPeerState ctx + , pif' + , markSubmittingTxs txsToSubmit (pacSharedState ctx) + ) -- | Construct a 'PeerRequestTxs' action and update local and shared tx state. applyRequestTxsChoice :: PeerActionContext peeraddr txid tx -> [TxKey] -> SizeInBytes -> IntMap.IntMap (TxEntry peeraddr) - -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) + -> (PeerAction, PeerTxLocalState tx, PeerTxInFlight, SharedTxState peeraddr txid) applyRequestTxsChoice ctx txsToRequest txsToRequestSize txTable = ( PeerRequestTxs txsToRequest , peerState'' + , peerInFlight'' , sharedState'' ) where + requestedKeys = IntSet.fromList (unTxKey <$> txsToRequest) peerState'' = (pacPeerState ctx) { peerRequestedTxs = - foldl' (flip IntSet.insert) (peerRequestedTxs (pacPeerState ctx)) (unTxKey <$> txsToRequest), + peerRequestedTxs (pacPeerState ctx) `IntSet.union` requestedKeys, peerRequestedTxBatches = peerRequestedTxBatches (pacPeerState ctx) StrictSeq.|> RequestedTxBatch { - requestedTxBatchSet = IntSet.fromList (unTxKey <$> txsToRequest), + requestedTxBatchSet = requestedKeys, requestedTxBatchSize = txsToRequestSize }, peerRequestedTxsSize = peerRequestedTxsSize (pacPeerState ctx) + txsToRequestSize } + pif = pacPeerInFlight ctx + peerInFlight'' = pif { + pifLeased = pifLeased pif `IntSet.union` requestedKeys, + pifAttempting = pifAttempting pif `IntSet.union` requestedKeys + } -- Take the lease without waking other advertisers: claiming doesn't -- give them a new option (they couldn't claim before this commit, and -- they still can't), and they'll be bumped on submit / lease release. @@ -249,48 +257,52 @@ applyRequestTxsChoice ctx txsToRequest txsToRequestSize txTable = -- | Construct a 'PeerRequestTxIds' action and update local and shared txid state. applyRequestTxIdsChoice - :: (Ord peeraddr, HasRawTxId txid) - => PeerActionContext peeraddr txid tx + :: PeerActionContext peeraddr txid tx -> TxIdsReqFlavour -> [TxKey] -> NumTxIdsToAck -> NumTxIdsToReq -> StrictSeq.StrictSeq TxKey - -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) + -> (PeerAction, PeerTxLocalState tx, PeerTxInFlight, SharedTxState peeraddr txid) applyRequestTxIdsChoice ctx flavour acknowledgedTxIds txIdsToAcknowledge txIdsToRequest unacknowledgedTxIds' = ( PeerRequestTxIds flavour txIdsToAcknowledge txIdsToRequest , peerState'' - , sharedState'' + , peerInFlight'' + , pacSharedState ctx ) where + peerState0 = pacPeerState ctx + acknowledgedKeys = IntSet.fromList (unTxKey <$> acknowledgedTxIds) peerState'' = - (pacPeerState ctx) { + peerState0 { peerAvailableTxIds = - IntMap.withoutKeys (peerAvailableTxIds (pacPeerState ctx)) (IntSet.fromList $ unTxKey <$> acknowledgedTxIds), + IntMap.withoutKeys (peerAvailableTxIds peerState0) acknowledgedKeys, peerUnacknowledgedTxIds = unacknowledgedTxIds', - peerRequestedTxIds = peerRequestedTxIds (pacPeerState ctx) + txIdsToRequest + peerRequestedTxIds = peerRequestedTxIds peerState0 + txIdsToRequest + } + pif = pacPeerInFlight ctx + peerInFlight'' = pif { + pifAdvertised = pifAdvertised pif `IntSet.difference` acknowledgedKeys } - sharedState'' = - acknowledgeTxIds (pacPeerAddr ctx) acknowledgedTxIds (pacSharedState ctx) -- | Construct a 'PeerDoNothing' action. applyDoNothingChoice :: PeerActionContext peeraddr txid tx -> Word64 -> Maybe DiffTime - -> (PeerAction, PeerTxLocalState tx, SharedTxState peeraddr txid) + -> (PeerAction, PeerTxLocalState tx, PeerTxInFlight, SharedTxState peeraddr txid) applyDoNothingChoice ctx generation wakeDelay = ( PeerDoNothing generation wakeDelay , pacPeerState ctx + , pacPeerInFlight ctx , pacSharedState ctx ) -- | Select downloaded transactions that this peer may submit to the mempool. pickSubmitAction - :: Ord peeraddr - => PeerActionContext peeraddr txid tx + :: PeerActionContext peeraddr txid tx -> Maybe [TxKey] -pickSubmitAction PeerActionContext { pacPeerAddr, pacPeerState, pacSharedState } = +pickSubmitAction PeerActionContext { pacPeerState, pacPeerInFlight, pacSharedState } = let txsToSubmit = pickBufferedTxsToSubmit in if null txsToSubmit then Nothing @@ -311,9 +323,8 @@ pickSubmitAction PeerActionContext { pacPeerAddr, pacPeerState, pacSharedState } go acc (txKey@(TxKey k) : rest) = case IntMap.lookup k (sharedTxTable pacSharedState) of Just txEntry - | IntMap.member k (peerDownloadedTxs pacPeerState) - , txBufferedByPeer pacPeerAddr txEntry - , not (txSubmittingByOther pacPeerAddr txEntry) -> + | txBufferedByPeer pacPeerState k + , not (txSubmittingByOther pacPeerInFlight k txEntry) -> go (txKey : acc) rest _ | retainedMember k (sharedRetainedTxs pacSharedState) -> -- already resolved via another peer @@ -457,12 +468,12 @@ pickRequestTxIdsAction txIdRequestMode ctx@PeerActionContext { pacPolicy, pacPee -- | Compute the time delay until the peer should next wake to check for work. nextWakeDelay :: PeerActionContext peeraddr txid tx -> Maybe DiffTime nextWakeDelay PeerActionContext { pacNow, pacPolicy, pacClaimDelay - , pacSharedPeerState, pacPeerState, pacSharedState } = + , pacPeerState, pacPeerInFlight, pacSharedState } = (`diffTime` pacNow) <$> minMaybe (minMaybe nextClaimWake nextBumpWake) nextRetainWake where -- Wake at the earliest claim-ready time among txs this peer advertises. nextClaimWake = - IntSet.foldl' stepClaim Nothing (sharedPeerAdvertisedTxKeys pacSharedPeerState) + IntSet.foldl' stepClaim Nothing (pifAdvertised pacPeerInFlight) -- Wake at the earliest bump-ready time among txs this peer holds buffered. -- Scoped to 'peerDownloadedTxs' to match 'bumpStuckEntries': only the @@ -492,16 +503,15 @@ nextWakeDelay PeerActionContext { pacNow, pacPolicy, pacClaimDelay minMaybe x Nothing = x minMaybe (Just x) (Just y) = Just (min x y) --- | Assign a tx lease to a peer and mark it as downloading. -claimTx :: Ord peeraddr - => peeraddr +-- | Assign a tx lease to a peer and increment the attempt count. +claimTx :: peeraddr -> Time -> TxEntry peeraddr -> TxEntry peeraddr -claimTx peeraddr leaseUntil txEntry@TxEntry { txAttempts } = +claimTx peeraddr leaseUntil txEntry@TxEntry { txAttempt } = txEntry { - txLease = TxLeased peeraddr leaseUntil, - txAttempts = Map.insert peeraddr TxDownloading txAttempts + txLease = TxLeased peeraddr leaseUntil, + txAttempt = txAttempt + 1 } -- | Time at which a leased entry becomes eligible for an inflight cap bump. @@ -512,19 +522,19 @@ claimTx peeraddr leaseUntil txEntry@TxEntry { txAttempts } = -- its own 'inflightTimeout' grace period before the next bump, so cap growth -- is rate-limited at one bump per 'inflightTimeout' per claim. stuckBumpReadyAt :: TxDecisionPolicy -> Time -> Time -stuckBumpReadyAt policy leaseUntil = - addTime (inflightTimeout policy - interTxSpace policy) leaseUntil +stuckBumpReadyAt policy = + addTime (inflightTimeout policy - interTxSpace policy) -- | Bump 'currentMaxInflightMultiplicity' by one when the leaseholder has --- held the lease past 'inflightTimeout' without anyone reaching 'TxSubmitting', +-- held the lease past 'inflightTimeout' without anyone reaching submission, -- and the cap is the bottleneck preventing another peer from joining. bumpCurrentMaxIfStuck :: Time -> TxDecisionPolicy -> TxEntry peeraddr -> TxEntry peeraddr bumpCurrentMaxIfStuck now policy entry@TxEntry { txLease = TxLeased _ leaseUntil , currentMaxInflightMultiplicity = cap } - | txActiveAttemptCount entry >= cap + | txAttempt entry >= cap , now >= stuckBumpReadyAt policy leaseUntil - , not (txSubmittingAnywhere entry) + , not (txInSubmission entry) = entry { currentMaxInflightMultiplicity = cap + 1 } bumpCurrentMaxIfStuck _ _ entry = entry @@ -537,8 +547,8 @@ nextStuckBumpWake :: Time -> TxDecisionPolicy -> TxEntry peeraddr -> Maybe Time nextStuckBumpWake now policy entry@TxEntry { txLease = TxLeased _ leaseUntil , currentMaxInflightMultiplicity = cap } - | txActiveAttemptCount entry >= cap - , not (txSubmittingAnywhere entry) + | txAttempt entry >= cap + , not (txInSubmission entry) , let bumpAt = stuckBumpReadyAt policy leaseUntil , bumpAt >= now = Just bumpAt @@ -550,24 +560,18 @@ nextStuckBumpWake _ _ _ = Nothing -- is small (usually empty) so the sweep is cheap, and any tx it has buffered -- is one it has at some point claimed itself. -- --- When an entry is bumped, this also bumps the per-peer generation of every --- other peer that advertises it so they wake out of 'awaitSharedChange' and --- re-evaluate eligibility under the new cap. 'sharedGeneration' is bumped --- so 'writeSharedStateIfChanged' commits the update. -bumpStuckEntries :: Ord peeraddr - => Time +-- 'sharedGeneration' is bumped when entries change so other peers wake out +-- of 'awaitSharedChange' and re-evaluate eligibility under the new cap. +bumpStuckEntries :: Time -> TxDecisionPolicy - -> peeraddr -> PeerTxLocalState tx -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid -bumpStuckEntries now policy peeraddr peerState st = +bumpStuckEntries now policy peerState st = if IntSet.null bumpedKeys then st - else bumpPeerGenerations - (advertisingPeersForTxKeysExcept peeraddr bumpedKeys st) - st { sharedTxTable = txTable', - sharedGeneration = sharedGeneration st + 1 } + else st { sharedTxTable = txTable', + sharedGeneration = sharedGeneration st + 1 } where (bumpedKeys, txTable') = IntMap.foldlWithKey' bumpOne (IntSet.empty, sharedTxTable st) @@ -582,71 +586,52 @@ bumpStuckEntries now policy peeraddr peerState st = then (IntSet.insert k bumpedAcc, IntMap.insert k entry' tbl) else (bumpedAcc, tbl) --- | Number of peers currently attempting this tx body. --- --- Counts every peer in 'txAttempts' regardless of attempt state. Callers --- that have already excluded 'TxSubmitting' via 'txSubmittingAnywhere' --- effectively count only 'TxDownloading' and 'TxBuffered'. -txActiveAttemptCount :: TxEntry peeraddr -> Int -txActiveAttemptCount TxEntry { txAttempts } = Map.size txAttempts - -- | Determine if a tx is eligible for this peer to request. -- -- A tx is selectable if it can be claimed or is already owned by this peer -- and this peer's score-derived claim delay has elapsed. -txSelectable :: Ord peeraddr +-- +-- Callers iterate candidates from the peer's own 'peerAvailableTxIds', so +-- the "this peer advertises @txKey@" precondition is established by the +-- caller. +txSelectable :: Eq peeraddr => PeerActionContext peeraddr txid tx -> TxKey -> TxEntry peeraddr -> Bool -txSelectable PeerActionContext { pacNow, pacPeerAddr, pacSharedPeerState - , pacClaimDelay } - txKey +txSelectable PeerActionContext { pacNow, pacPeerAddr, pacClaimDelay + , pacPeerInFlight } + (TxKey k) txEntry - | txSubmittingAnywhere txEntry = False + | txInSubmission txEntry = False | txPeerHasAttempt = False - | txActiveAttemptCount txEntry >= currentMaxInflightMultiplicity txEntry = False - | not peerAdvertisesTx = False + | txAttempt txEntry >= currentMaxInflightMultiplicity txEntry = False | txOwnedByPeer txEntry = True | otherwise = txClaimReadyAt pacClaimDelay txEntry <= pacNow where - peerAdvertisesTx = - IntSet.member (unTxKey txKey) (sharedPeerAdvertisedTxKeys pacSharedPeerState) - -- txOwnedByPeer :: TxEntry peeraddr -> Bool txOwnedByPeer TxEntry { txLease = TxLeased owner _ } = owner == pacPeerAddr txOwnedByPeer TxEntry { txLease = TxClaimable _ } = False - txPeerHasAttempt = Map.member pacPeerAddr (txAttempts txEntry) + txPeerHasAttempt = + IntSet.member k (pifAttempting pacPeerInFlight) + || IntSet.member k (pifSubmitting pacPeerInFlight) - --- | Extract the peer's TxAttemptState for the TX entry, if it exists. -txAttemptOfPeer :: Ord peeraddr => peeraddr -> TxEntry peeraddr -> Maybe TxAttemptState -txAttemptOfPeer peeraddr TxEntry { txAttempts } = Map.lookup peeraddr txAttempts - --- | Does the peer have the TX entry buffered? -txBufferedByPeer :: Ord peeraddr => peeraddr -> TxEntry peeraddr -> Bool -txBufferedByPeer peeraddr txEntry = - txAttemptOfPeer peeraddr txEntry == Just TxBuffered +-- | Does the peer have the TX entry buffered locally? +-- +-- The peer's own 'peerDownloadedTxs' is the source of truth for "buffered +-- body present", so this is a peer-local lookup. +txBufferedByPeer :: PeerTxLocalState tx -> Int -> Bool +txBufferedByPeer peerState k = IntMap.member k (peerDownloadedTxs peerState) -- | Check whether some other peer is already submitting this tx. -- --- Uses a single fold over 'txAttempts' and short-circuits at the first --- matching 'TxSubmitting'. -txSubmittingByOther :: Eq peeraddr => peeraddr -> TxEntry peeraddr -> Bool -txSubmittingByOther peeraddr TxEntry { txAttempts } = - Map.foldrWithKey - (\owner attempt acc -> (owner /= peeraddr && attempt == TxSubmitting) || acc) - False - txAttempts - --- | Check whether any peer is currently submitting this tx. --- --- Like 'txSubmittingByOther', this short-circuits at the first 'TxSubmitting'. -txSubmittingAnywhere :: TxEntry peeraddr -> Bool -txSubmittingAnywhere TxEntry { txAttempts } = - Map.foldr (\attempt acc -> attempt == TxSubmitting || acc) False txAttempts - +-- True iff the entry's @txInSubmission@ flag is set and this peer is not +-- the submitter. +txSubmittingByOther :: PeerTxInFlight -> Int -> TxEntry peeraddr -> Bool +txSubmittingByOther pif k txEntry = + txInSubmission txEntry + && IntSet.notMember k (pifSubmitting pif) -- | Compute the current usefulness score for a peer after time-based decay. -- @@ -715,115 +700,44 @@ txClaimReadyAt claimDelay TxEntry { txLease } = TxLeased _ leaseUntil -> leaseUntil TxClaimable readyAt -> readyAt -updatePeerAdvertisedTxKeys - :: Ord peeraddr - => peeraddr - -> (IntSet.IntSet -> (a, IntSet.IntSet)) - -> SharedTxState peeraddr txid - -> (a, SharedTxState peeraddr txid) -updatePeerAdvertisedTxKeys peeraddr updateKeys st@SharedTxState { sharedPeers } = - case Map.lookup peeraddr sharedPeers of - Just sharedPeerState -> - let oldKeys = sharedPeerAdvertisedTxKeys sharedPeerState - (result, newKeys) = updateKeys oldKeys - in if newKeys == oldKeys - then (result, st) - else - ( result - , st { - sharedPeers = - Map.insert - peeraddr - (sharedPeerState { sharedPeerAdvertisedTxKeys = newKeys }) - sharedPeers - } - ) - Nothing -> - error "TxSubmission.V2.updatePeerAdvertisedTxKeys: missing peer" - --- | Acknowledge txids from a peer and update shared state. -acknowledgeTxIds :: (Ord peeraddr, HasRawTxId txid) - => peeraddr - -> [TxKey] - -> SharedTxState peeraddr txid - -> SharedTxState peeraddr txid -acknowledgeTxIds _ [] st = st -acknowledgeTxIds peeraddr acknowledgedTxIds st = - if IntSet.null removedKeys - then st - else - let st'' = IntSet.foldl' acknowledgeOne st' removedKeys - in st'' { sharedGeneration = sharedGeneration st + 1 } - where - acknowledgedKeys = IntSet.fromList (unTxKey <$> acknowledgedTxIds) - (removedKeys, st') = - updatePeerAdvertisedTxKeys peeraddr removeAdvertisedKeys st - - removeAdvertisedKeys advertisedKeys = - let removed = IntSet.intersection acknowledgedKeys advertisedKeys - in (removed, advertisedKeys `IntSet.difference` removed) - - acknowledgeOne acc k = - case IntMap.lookup k (sharedTxTable acc) of - Just txEntry -> - let txEntry' = txEntry { txAdvertiserCount = txAdvertiserCount txEntry - 1 } - in if activeTxLive txEntry' - then acc { sharedTxTable = IntMap.insert k txEntry' (sharedTxTable acc) } - else dropTxKey k acc - Nothing -> - acc - -- | Determine if an unacknowledged txid is ready to be acknowledged. -- -- A txid remains ackable after it has been resolved and removed from shared -- state. The wire protocol only needs an ack count for the peer-local -- unacknowledged prefix, so a late ack is still safe even after the active -- entry and retained marker have been pruned. -txIdAckable :: Ord peeraddr +txIdAckable :: Eq peeraddr => PeerActionContext peeraddr txid tx -> TxKey -> Bool -txIdAckable PeerActionContext { pacPeerAddr, pacPeerState, pacSharedPeerState, pacSharedState } +txIdAckable PeerActionContext { pacPeerAddr, pacPeerState, pacPeerInFlight, pacSharedState } (TxKey k) | retainedMember k (sharedRetainedTxs pacSharedState) = True + -- The peer no longer tracks the txid as advertised. This covers + -- mempool/retained txids that 'handleReceivedTxIds' kept out of the + -- advertised set, as well as txids the peer has already attempted + -- and submitted (or that another peer resolved). + | not (IntSet.member k (pifAdvertised pacPeerInFlight)) = True | otherwise = case IntMap.lookup k (sharedTxTable pacSharedState) of - Just txEntry@TxEntry { txLease, txAttempts } -> - not (IntSet.member k (sharedPeerAdvertisedTxKeys pacSharedPeerState)) - || - let ackWhenBuffered = - case txLease of - TxLeased owner _ -> owner == pacPeerAddr || Map.member pacPeerAddr txAttempts - TxClaimable _ -> Map.member pacPeerAddr txAttempts - in - -- Ack the txid if we downloaded it and no other - -- peer is in the process of submitting it to the - -- mempool. - ackWhenBuffered - && IntMap.member k (peerDownloadedTxs pacPeerState) - && not (txBufferedByPeer pacPeerAddr txEntry - && txSubmittingByOther pacPeerAddr txEntry) + Just txEntry -> + let peerHasStake = + IntSet.member k (pifAttempting pacPeerInFlight) + || IntSet.member k (pifSubmitting pacPeerInFlight) + ackWhenBuffered = + case txLease txEntry of + TxLeased owner _ -> owner == pacPeerAddr || peerHasStake + TxClaimable _ -> peerHasStake + in + -- Ack the txid if we downloaded it and no other + -- peer is in the process of submitting it to the + -- mempool. + ackWhenBuffered + && IntMap.member k (peerDownloadedTxs pacPeerState) + && not (txBufferedByPeer pacPeerState k + && txSubmittingByOther pacPeerInFlight k txEntry) Nothing -> True -- Safe late ack after the resolved tx was pruned from shared state. --- | Remove one transaction entry from all shared state maps by key. -dropTxKey :: HasRawTxId txid - => Int - -> SharedTxState peeraddr txid - -> SharedTxState peeraddr txid -dropTxKey k st@SharedTxState { sharedTxTable, sharedRetainedTxs, sharedTxIdToKey - , sharedKeyToTxId } = - st { - sharedTxTable = IntMap.delete k sharedTxTable, - sharedRetainedTxs = retainedDeleteKeys (IntSet.singleton k) sharedRetainedTxs, - sharedTxIdToKey = deleteTxId sharedTxIdToKey, - sharedKeyToTxId = IntMap.delete k sharedKeyToTxId - } - where - deleteTxId txIdToKey = - case IntMap.lookup k sharedKeyToTxId of - Just txid -> Map.delete (getRawTxId txid) txIdToKey - Nothing -> txIdToKey - -- | Remove transaction entries from all shared state maps by key. dropTxKeys :: HasRawTxId txid => IntSet.IntSet @@ -845,188 +759,70 @@ dropTxKeys keys st@SharedTxState { sharedTxTable, sharedRetainedTxs, sharedTxIdT Just txid -> Map.delete (getRawTxId txid) txIdToKey Nothing -> txIdToKey --- | Remove transaction keys that are no longer active from the shared state. -dropDeadActiveKeys :: HasRawTxId txid - => IntSet.IntSet - -> SharedTxState peeraddr txid - -> SharedTxState peeraddr txid -dropDeadActiveKeys keys st@SharedTxState { sharedTxTable } = - let deadKeys = IntSet.filter isDead keys in - dropTxKeys deadKeys st - where - isDead k = - case IntMap.lookup k sharedTxTable of - Just txEntry -> not (activeTxLive txEntry) - Nothing -> False - -- | Shared-state cleanup -- -- Drops two kinds of dead entries in one pass: -- -- * Retained entries whose retention deadline has passed. --- * Orphaned 'sharedTxTable' entries. +-- * Orphaned 'sharedTxTable' entries: entries with a released lease, +-- no in-flight attempt, and no live peer still tracking the key in +-- its 'pifAdvertised' set. +-- +-- The @liveAdvertised@ set is the union of every active peer's +-- 'pifAdvertised'. Caller (the sweep thread) snapshots all per-peer +-- TVars in the same STM transaction that runs this function so the +-- snapshot is coherent with the sharedTxTable read. -- -- Bumps 'sharedGeneration' if anything changed so sleeping peer workers wake -- and re-evaluate. sweepSharedState :: HasRawTxId txid => Time + -> IntSet -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid -sweepSharedState now st +sweepSharedState now liveAdvertised st | IntSet.null toDrop = st | otherwise = (dropTxKeys toDrop st) { sharedGeneration = sharedGeneration st + 1 } where expiredRetained = retainedExpiredKeys now (sharedRetainedTxs st) - orphans = IntMap.keysSet (IntMap.filter isOrphan (sharedTxTable st)) + orphans = + IntMap.keysSet + (IntMap.filterWithKey isOrphan (sharedTxTable st)) toDrop = expiredRetained `IntSet.union` orphans - isOrphan TxEntry { txLease = TxLeased {} } = False - isOrphan TxEntry { txAdvertiserCount, txAttempts } = - txAdvertiserCount == 0 && Map.null txAttempts + isOrphan _ TxEntry { txLease = TxLeased {} } = False + isOrphan k TxEntry { txAttempt, txInSubmission } + | txAttempt > 0 = False + | txInSubmission = False + | IntSet.member k liveAdvertised = False + | otherwise = True {-# INLINABLE sweepSharedState #-} --- | Is the TX entry alive? --- --- A TX entry is alive if there is a lease, there are advertisers for it or there are --- download attempts for it. -activeTxLive :: TxEntry peeraddr -> Bool -activeTxLive TxEntry { txLease, txAdvertiserCount, txAttempts } = - leaseLive txLease - || txAdvertiserCount > 0 - || not (Map.null txAttempts) - where - leaseLive TxClaimable {} = False - leaseLive TxLeased {} = True - - -peerAdvertisesTxKey :: Int -> SharedPeerState -> Bool -peerAdvertisesTxKey k SharedPeerState { sharedPeerAdvertisedTxKeys } = - IntSet.member k sharedPeerAdvertisedTxKeys - -peerAdvertisesAnyTxKey - :: IntSet.IntSet - -> SharedPeerState - -> Bool -peerAdvertisesAnyTxKey targetKeys SharedPeerState { sharedPeerAdvertisedTxKeys } = - not (IntSet.disjoint targetKeys sharedPeerAdvertisedTxKeys) - -advertisingPeersForTxKeysExcept - :: Ord peeraddr - => peeraddr - -> IntSet.IntSet - -> SharedTxState peeraddr txid - -> Set.Set peeraddr -advertisingPeersForTxKeysExcept _ targetKeys _ - | IntSet.null targetKeys = Set.empty -advertisingPeersForTxKeysExcept currentPeer targetKeys SharedTxState { sharedPeers } = - Map.foldlWithKey' collect Set.empty sharedPeers - where - collect acc peeraddr sharedPeerState - | peeraddr == currentPeer = acc - | peerAdvertisesAnyTxKey targetKeys sharedPeerState = Set.insert peeraddr acc - | otherwise = acc - -advertisingPeersForTx - :: Ord peeraddr - => TxKey - -> Map.Map peeraddr SharedPeerState - -> Set.Set peeraddr -advertisingPeersForTx (TxKey k) = - Map.foldlWithKey' collect Set.empty - where - collect acc peeraddr sharedPeerState - | peerAdvertisesTxKey k sharedPeerState = Set.insert peeraddr acc - | otherwise = acc - -advertisingPeersForTxExcept - :: Ord peeraddr - => peeraddr - -> TxKey - -> SharedTxState peeraddr txid - -> Set.Set peeraddr -advertisingPeersForTxExcept currentPeer txKey = - advertisingPeersForTxKeysExcept currentPeer (IntSet.singleton (unTxKey txKey)) - -removeAdvertisingPeersForResolvedTx - :: Ord peeraddr - => TxKey - -> SharedTxState peeraddr txid - -> (SharedTxState peeraddr txid, Set.Set peeraddr) -removeAdvertisingPeersForResolvedTx txKey@(TxKey k) st@SharedTxState { sharedPeers } - | Set.null advertisers = (st, advertisers) - | otherwise = - ( st { - sharedPeers = - Set.foldl' clearAdvertisedKey sharedPeers advertisers - } - , advertisers - ) - where - advertisers = advertisingPeersForTx txKey sharedPeers - - clearAdvertisedKey peers peeraddr = - Map.adjust - (\sharedPeerState -> - sharedPeerState { - sharedPeerAdvertisedTxKeys = - IntSet.delete k (sharedPeerAdvertisedTxKeys sharedPeerState) - }) - peeraddr - peers - -removeAdvertisingPeersForResolvedTxExcept - :: Ord peeraddr - => peeraddr - -> TxKey - -> SharedTxState peeraddr txid - -> (SharedTxState peeraddr txid, Set.Set peeraddr) -removeAdvertisingPeersForResolvedTxExcept currentPeer txKey@(TxKey k) st@SharedTxState { sharedPeers } - | Set.null advertisers = (st, advertisers) - | otherwise = - ( st { - sharedPeers = - Set.foldl' clearAdvertisedKey sharedPeers advertisers - } - , advertisers - ) - where - advertisers = advertisingPeersForTxExcept currentPeer txKey st - - clearAdvertisedKey peers peeraddr = - Map.adjust - (\sharedPeerState -> - sharedPeerState { - sharedPeerAdvertisedTxKeys = - IntSet.delete k (sharedPeerAdvertisedTxKeys sharedPeerState) - }) - peeraddr - peers - - -- | Handle a batch of tx bodies received from one peer. -- -- Received bodies are buffered locally in the peer state. Bodies that are -- already retained or already in the mempool are counted as late and dropped. -- Any requested tx omitted from the reply releases this peer's ownership. -- Late TXs contributes to the returned penalty count. -handleReceivedTxs :: (Ord peeraddr, HasRawTxId txid) +handleReceivedTxs :: (Eq peeraddr, HasRawTxId txid) => (txid -> Bool) -> Time -> TxDecisionPolicy -> peeraddr -> [(txid, tx)] -> PeerTxLocalState tx + -> PeerTxInFlight -> SharedTxState peeraddr txid - -> (Int, Int, PeerTxLocalState tx, SharedTxState peeraddr txid) -handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = - (omittedCount, lateCount, peerState', sharedState') + -> (Int, Int, PeerTxLocalState tx, PeerTxInFlight, SharedTxState peeraddr txid) +handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState peerInFlight sharedState = + (omittedCount, lateCount, peerState', peerInFlight', sharedState') where txidToKey = sharedTxIdToKey sharedState requestedKeys = requestedTxBatchSet requestedBatch - retainUntil = addTime (bufferedTxsMinLifetime policy) now + retainUntil = addTime (bufferedTxsMinLifetime policy) now -- Dequeue the next requested tx batch to process. (requestedBatch, remainingRequestedBatches) = @@ -1036,72 +832,69 @@ handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = batch StrictSeq.:<| batches -> (batch, batches) - -- Process each received tx, collecting late counts, pending requests, - -- updated shared state, and peers to wake up. + -- Process each received tx, collecting late counts, pending + -- requests, the keys still buffered by this peer, and updated + -- shared state. @bufferedKeys@ is the subset of @requestedKeys@ + -- that ended up buffered locally; the rest are released below. ( lateCount , pendingRequestedKeys + , bufferedKeys , sharedStateHandled - , receivedWakePeers , peerDownloadedTxs' ) = foldl' handleOne ( 0 , requestedKeys + , IntSet.empty , sharedState - , Set.empty , peerDownloadedTxs peerState ) txs - (omittedAdvertisedKeys, sharedStateReleased0) = - updatePeerAdvertisedTxKeys peeraddr removeOmittedAdvertisedKeys sharedStateHandled - -- Process omitted (not received) txs: count a penalty for every omitted - -- request, release ownership for keys that are still live, and collect - -- peers to wake up. - (omittedCount, sharedStateReleased, omittedWakePeers) = - IntSet.foldl' handleOmitted (0, sharedStateReleased0, Set.empty) pendingRequestedKeys + -- request and release this peer's lease where it still held one. + (omittedCount, sharedStateReleased) = + IntSet.foldl' handleOmitted (0, sharedStateHandled) pendingRequestedKeys - -- Build the final shared state with updated tables and cleaned-up dead entries. - sharedState'' = - dropDeadActiveKeys pendingRequestedKeys $ - sharedStateReleased { - sharedGeneration = sharedGeneration sharedState + 1 - } + -- Keys this peer is no longer attempting (everything in the batch + -- except still-buffered keys). Used to update both the per-peer + -- in-flight set and the shared 'txAttempt' counters via 'releaseLease'. + releasedKeys = requestedKeys `IntSet.difference` bufferedKeys - -- Update peer state: remove processed keys, update batch tracking, and record - -- downloaded txs. + sharedState' = + sharedStateReleased { + sharedGeneration = sharedGeneration sharedState + 1 + } + + -- Update peer state: remove processed keys, update batch tracking, + -- and record downloaded txs. peerState' = peerState { peerAvailableTxIds = - IntSet.foldl' (flip IntMap.delete) (peerAvailableTxIds peerState) requestedKeys + IntMap.withoutKeys (peerAvailableTxIds peerState) requestedKeys , peerRequestedTxs = peerRequestedTxs peerState `IntSet.difference` requestedKeys , peerRequestedTxBatches = remainingRequestedBatches , peerRequestedTxsSize = peerRequestedTxsSize peerState - requestedTxBatchSize requestedBatch , peerDownloadedTxs = peerDownloadedTxs' } - -- Flag peers that may now have work available after processing txs. - sharedState' = - bumpPeerGenerations - (Set.union receivedWakePeers omittedWakePeers) - sharedState'' + peerInFlight' = peerInFlight { + pifLeased = pifLeased peerInFlight `IntSet.difference` releasedKeys, + pifAttempting = pifAttempting peerInFlight `IntSet.difference` releasedKeys, + pifAdvertised = pifAdvertised peerInFlight `IntSet.difference` releasedKeys + } keyWasLive k = IntMap.member k (sharedTxTable sharedState) || retainedMember k (sharedRetainedTxs sharedState) - removeOmittedAdvertisedKeys advertisedKeys = - let removed = IntSet.intersection pendingRequestedKeys advertisedKeys - in (removed, advertisedKeys `IntSet.difference` removed) - -- Fold function over received txs: classify as late, already in mempool, or buffer for -- download. handleOne ( lateCountAcc , pendingKeysAcc + , bufferedAcc , sharedAcc - , wakePeersAcc , downloadedAcc ) (txid, tx) = @@ -1109,107 +902,86 @@ handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = Nothing -> ( lateCountAcc + 1 , pendingKeysAcc + , bufferedAcc , sharedAcc - , wakePeersAcc , downloadedAcc ) - Just txKey@(TxKey k) + Just (TxKey k) | retainedMember k (sharedRetainedTxs sharedAcc) -> let sharedAcc' = sharedAcc { - sharedTxTable = IntMap.delete k (sharedTxTable sharedAcc) + sharedTxTable = + IntMap.adjust decAttempt k (sharedTxTable sharedAcc) } in ( lateCountAcc + 1 , IntSet.delete k pendingKeysAcc + , bufferedAcc , sharedAcc' - , wakePeersAcc , downloadedAcc ) | mempoolHasTx txid -> - let (sharedAcc', advertisers) = - case IntMap.lookup k (sharedTxTable sharedAcc) of - Just _ -> - removeAdvertisingPeersForResolvedTx txKey sharedAcc - Nothing -> - (sharedAcc, Set.empty) - wakePeers = - Set.union wakePeersAcc (Set.delete peeraddr advertisers) - sharedAcc'' = - sharedAcc' { - sharedTxTable = IntMap.delete k (sharedTxTable sharedAcc'), + let sharedAcc' = + sharedAcc { + sharedTxTable = IntMap.delete k (sharedTxTable sharedAcc), sharedRetainedTxs = - retainedInsertMax k retainUntil (sharedRetainedTxs sharedAcc') + retainedInsertMax k retainUntil (sharedRetainedTxs sharedAcc) } in ( lateCountAcc + 1 , IntSet.delete k pendingKeysAcc - , sharedAcc'' - , wakePeers + , bufferedAcc + , sharedAcc' , downloadedAcc ) | otherwise -> case IntMap.lookup k (sharedTxTable sharedAcc) of - Just txEntry - | peerHasAttempt txEntry -> + Just _txEntry + | IntSet.member k (pifAttempting peerInFlight) -> ( lateCountAcc , IntSet.delete k pendingKeysAcc - , sharedAcc { - sharedTxTable = - IntMap.insert k (markBuffered txEntry) - (sharedTxTable sharedAcc) - } - , wakePeersAcc + , IntSet.insert k bufferedAcc + , sharedAcc , IntMap.insert k tx downloadedAcc ) _ -> ( lateCountAcc + 1 , IntSet.delete k pendingKeysAcc + , bufferedAcc , sharedAcc - , wakePeersAcc , downloadedAcc ) - -- Handle omitted (not received) txs: release ownership, count penalties, - -- and wake up other advertisers if the tx is still active. - handleOmitted (omittedCountAcc, sharedAcc, wakePeersAcc) k + -- Handle omitted (not received) txs: release this peer's lease so + -- another advertiser can claim the tx, decrement 'txAttempt'. + handleOmitted (omittedCountAcc, sharedAcc) k | keyWasLive k = let sharedAcc' = case IntMap.lookup k (sharedTxTable sharedAcc) of Just txEntry -> sharedAcc { sharedTxTable = - IntMap.insert k (releaseLease (IntSet.member k omittedAdvertisedKeys) txEntry) + IntMap.insert k (releaseLease txEntry) (sharedTxTable sharedAcc) } Nothing -> sharedAcc - wakePeersAcc' = - case IntMap.lookup k (sharedTxTable sharedAcc') of - Just txEntry - | activeTxLive txEntry -> - Set.union - (advertisingPeersForTxExcept peeraddr (TxKey k) sharedAcc') - wakePeersAcc - _ -> wakePeersAcc in - (omittedCountAcc + 1, sharedAcc', wakePeersAcc') + in (omittedCountAcc + 1, sharedAcc') | otherwise = - (omittedCountAcc + 1, sharedAcc, wakePeersAcc) - - peerHasAttempt TxEntry { txAttempts } = - Map.member peeraddr txAttempts + (omittedCountAcc + 1, sharedAcc) - markBuffered txEntry@TxEntry { txAttempts } = - txEntry { txAttempts = Map.insert peeraddr TxBuffered txAttempts } + -- Decrement the entry's attempt counter (e.g. when its body arrived + -- but the tx had been retained meanwhile, so the peer's attempt is + -- effectively over). Lease unaffected: if this peer didn't hold it, + -- nothing to do; if it did, the decrement still leaves @txAttempt@ + -- non-negative because every increment had a paired peer. + decAttempt entry@TxEntry { txAttempt } = + entry { txAttempt = max 0 (txAttempt - 1) } - releaseLease wasAdvertised txEntry@TxEntry { txLease, txAdvertiserCount, txAttempts } = + releaseLease txEntry@TxEntry { txLease, txAttempt } = txEntry { txLease = case txLease of TxLeased owner _ | owner == peeraddr -> TxClaimable now _ -> txLease, - txAdvertiserCount = - if wasAdvertised - then txAdvertiserCount - 1 - else txAdvertiserCount, - txAttempts = Map.delete peeraddr txAttempts + txAttempt = max 0 (txAttempt - 1) } {-# INLINABLE handleReceivedTxs #-} @@ -1218,19 +990,20 @@ handleReceivedTxs mempoolHasTx now policy peeraddr txs peerState sharedState = -- -- Accepted txs leave the active table and move into the retained set so later -- txid advertisements can be acknowledged without re-requesting the body. --- Txs rejected by the mempool release this peer's attempt state and advertiser --- slot so another advertiser may try later. -handleSubmittedTxs :: (Ord peeraddr, HasRawTxId txid) +-- Txs rejected by the mempool release this peer's lease and clear +-- 'txInSubmission' so another advertiser may try later. +handleSubmittedTxs :: Eq peeraddr => Time -> TxDecisionPolicy -> peeraddr -> [TxKey] -> [TxKey] -> PeerTxLocalState tx + -> PeerTxInFlight -> SharedTxState peeraddr txid - -> (PeerTxLocalState tx, SharedTxState peeraddr txid) -handleSubmittedTxs now policy peeraddr acceptedTxs rejectedTxs peerState sharedState = - (peerState', sharedState') + -> (PeerTxLocalState tx, PeerTxInFlight, SharedTxState peeraddr txid) +handleSubmittedTxs now policy peeraddr acceptedTxs rejectedTxs peerState peerInFlight sharedState = + (peerState', peerInFlight', sharedState') where acceptedKeys = IntSet.fromList (unTxKey <$> acceptedTxs) rejectedKeys = IntSet.fromList (unTxKey <$> rejectedTxs) @@ -1238,110 +1011,85 @@ handleSubmittedTxs now policy peeraddr acceptedTxs rejectedTxs peerState sharedS peerState' = peerState { peerDownloadedTxs = - IntSet.foldl' (flip IntMap.delete) (peerDownloadedTxs peerState) submittedKeys + IntMap.withoutKeys (peerDownloadedTxs peerState) submittedKeys, + peerAvailableTxIds = + IntMap.withoutKeys (peerAvailableTxIds peerState) submittedKeys } - (sharedStateAfterAccepted, acceptedAdvertisers) = - foldl' acceptSubmittedTx (sharedState, Set.empty) acceptedTxs + -- Submission outcomes clear the peer's submission and advertised + -- contributions for these keys; the peer is done with them. The + -- attempt was already taken off 'pifAttempting' (and 'txAttempt') + -- when 'markSubmittingTxs' fired. + peerInFlight' = peerInFlight { + pifLeased = pifLeased peerInFlight `IntSet.difference` submittedKeys, + pifSubmitting = pifSubmitting peerInFlight `IntSet.difference` submittedKeys, + pifAdvertised = pifAdvertised peerInFlight `IntSet.difference` submittedKeys + } - (rejectedAdvertisedKeys, sharedStateAfterRejectedPeer) = - updatePeerAdvertisedTxKeys peeraddr removeRejectedAdvertisedKeys sharedStateAfterAccepted + sharedStateAfterAccepted = + foldl' acceptSubmittedTx sharedState acceptedTxs - (sharedStateAfterRejected, rejectedWakePeers) = - IntSet.foldl' updateRejected (sharedStateAfterRejectedPeer, Set.empty) rejectedKeys + sharedStateAfterRejected = + IntSet.foldl' updateRejected sharedStateAfterAccepted rejectedKeys sharedState' = - bumpPeerGenerations - (Set.union acceptedAdvertisers rejectedWakePeers) - sharedState'' - - sharedState'' = - dropDeadActiveKeys rejectedKeys $ - sharedStateAfterRejected { - sharedGeneration = sharedGeneration sharedState + 1 - } + sharedStateAfterRejected { + sharedGeneration = sharedGeneration sharedState + 1 + } retainedUntil = addTime (bufferedTxsMinLifetime policy) now - removeRejectedAdvertisedKeys advertisedKeys = - let removed = IntSet.intersection rejectedKeys advertisedKeys - in (removed, advertisedKeys `IntSet.difference` removed) - - acceptSubmittedTx (sharedAcc, wakePeersAcc) txKey@(TxKey k) = - let (sharedAcc', advertisers) = - case IntMap.lookup k (sharedTxTable sharedAcc) of - Just _ -> - removeAdvertisingPeersForResolvedTx txKey sharedAcc - Nothing -> - (sharedAcc, Set.empty) - sharedAcc'' = - sharedAcc' { - sharedTxTable = IntMap.delete k (sharedTxTable sharedAcc'), - sharedRetainedTxs = - retainedInsertMax k retainedUntil (sharedRetainedTxs sharedAcc') - } - in (sharedAcc'', Set.union wakePeersAcc (Set.delete peeraddr advertisers)) - - updateRejected (sharedAcc, wakePeersAcc) k = - let sharedAcc' = - case IntMap.lookup k (sharedTxTable sharedAcc) of - Just txEntry -> - sharedAcc { - sharedTxTable = - IntMap.insert k - (markRejected (IntSet.member k rejectedAdvertisedKeys) txEntry) - (sharedTxTable sharedAcc) - } - Nothing -> - sharedAcc - wakePeersAcc' = - case IntMap.lookup k (sharedTxTable sharedAcc') of - Just txEntry - | activeTxLive txEntry -> - Set.union - (advertisingPeersForTxExcept peeraddr (TxKey k) sharedAcc') - wakePeersAcc - _ -> - wakePeersAcc - in (sharedAcc', wakePeersAcc') - - markRejected wasAdvertised txEntry@TxEntry { txLease, txAdvertiserCount, txAttempts } = + acceptSubmittedTx sharedAcc (TxKey k) = + sharedAcc { + sharedTxTable = IntMap.delete k (sharedTxTable sharedAcc), + sharedRetainedTxs = + retainedInsertMax k retainedUntil (sharedRetainedTxs sharedAcc) + } + + updateRejected sharedAcc k = + case IntMap.lookup k (sharedTxTable sharedAcc) of + Just txEntry -> + sharedAcc { + sharedTxTable = + IntMap.insert k (markRejected txEntry) (sharedTxTable sharedAcc) + } + Nothing -> + sharedAcc + + markRejected txEntry@TxEntry { txLease } = txEntry { txLease = case txLease of TxLeased owner _ | owner == peeraddr -> TxClaimable now _ -> txLease, - txAdvertiserCount = - if wasAdvertised - then txAdvertiserCount - 1 - else txAdvertiserCount, - txAttempts = Map.delete peeraddr txAttempts + txInSubmission = False } {-# INLINABLE handleSubmittedTxs #-} -- | Mark buffered txs as entering mempool submission. -markSubmittingTxs :: Ord peeraddr - => peeraddr - -> [TxKey] +-- +-- Decrements 'txAttempt' (the peer is leaving the @attempting@ state) +-- and sets 'txInSubmission'. STM serialisation around the +-- @ChooseSubmit@ choice guarantees only one peer ever flips +-- 'txInSubmission' from @False@ to @True@ for a given key. +markSubmittingTxs :: [TxKey] -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid -markSubmittingTxs _ [] st = st -markSubmittingTxs peeraddr txKeys st = +markSubmittingTxs [] st = st +markSubmittingTxs txKeys st = st { - sharedTxTable = foldl' markOne (sharedTxTable st) txKeys, + sharedTxTable = foldl' markOne (sharedTxTable st) txKeys, sharedGeneration = sharedGeneration st + 1 } where markOne txTable (TxKey k) = IntMap.adjust markSubmitting k txTable - markSubmitting txEntry@TxEntry { txAttempts } = + markSubmitting txEntry@TxEntry { txAttempt } = txEntry { - txAttempts = Map.adjust toSubmitting peeraddr txAttempts + txAttempt = max 0 (txAttempt - 1), + txInSubmission = True } - toSubmitting TxBuffered = TxSubmitting - toSubmitting attempt = attempt - -- | Handle a batch of txids received from one peer. -- @@ -1350,35 +1098,27 @@ markSubmittingTxs peeraddr txKeys st = -- advertises the txid may claim it once its score-derived delay has elapsed, -- which avoids pinning fresh work to the first peer that happened to announce -- it. -handleReceivedTxIds :: forall peeraddr txid tx. (Ord peeraddr, HasRawTxId txid) +handleReceivedTxIds :: forall peeraddr txid tx. HasRawTxId txid => (txid -> Bool) -> Time -> TxDecisionPolicy - -> peeraddr -> NumTxIdsToReq -> [(txid, SizeInBytes)] -> PeerTxLocalState tx + -> PeerTxInFlight -> SharedTxState peeraddr txid - -> (PeerTxLocalState tx, SharedTxState peeraddr txid) -handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSizes - peerState sharedState = - (peerState'', sharedState'') + -> (PeerTxLocalState tx, PeerTxInFlight, SharedTxState peeraddr txid) +handleReceivedTxIds mempoolHasTx now policy requestedTxIds txidsAndSizes + peerState peerInFlight sharedState = + (peerState'', peerInFlight'', sharedState'') where - sharedPeerState0 = - case Map.lookup peeraddr (sharedPeers sharedState) of - Just sharedPeerState -> sharedPeerState - Nothing -> - error "TxSubmission.V2.handleReceivedTxIds: missing peer" - - peerAdvertisedKeys0 = sharedPeerAdvertisedTxKeys sharedPeerState0 + peerAdvertisedKeys0 = pifAdvertised peerInFlight - -- Fold over received txids: build unacknowledged list, update tables, - -- and track peers to wake based on tx state (retained/mempool/new). + -- Fold over received txids: build unacknowledged list, update tables. ( receivedTxKeysRev , peerAvailableTxIds' , sharedStateHandled , peerAdvertisedKeys' - , peersToWake , sharedChanged ) = foldl' @@ -1387,7 +1127,6 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , peerAvailableTxIds peerState , sharedState , peerAdvertisedKeys0 - , Set.empty , False ) txidsAndSizes @@ -1403,20 +1142,15 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize peerAvailableTxIds = peerAvailableTxIds' } + peerInFlight'' = peerInFlight { + pifAdvertised = peerAdvertisedKeys' + } + sharedState'' - | sharedChanged || peerAdvertisedKeys' /= peerAdvertisedKeys0 = - bumpPeerGenerations peersToWake $ - sharedStateHandled { - sharedPeers = - if peerAdvertisedKeys' == peerAdvertisedKeys0 - then sharedPeers sharedStateHandled - else - Map.insert - peeraddr - (sharedPeerState0 { sharedPeerAdvertisedTxKeys = peerAdvertisedKeys' }) - (sharedPeers sharedStateHandled), - sharedGeneration = sharedGeneration sharedState + 1 - } + | sharedChanged = + sharedStateHandled { + sharedGeneration = sharedGeneration sharedState + 1 + } | otherwise = sharedState @@ -1428,7 +1162,6 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , IntMap.IntMap SizeInBytes , SharedTxState peeraddr txid , IntSet.IntSet - , Set.Set peeraddr , Bool ) -> (txid, SizeInBytes) @@ -1436,7 +1169,6 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , IntMap.IntMap SizeInBytes , SharedTxState peeraddr txid , IntSet.IntSet - , Set.Set peeraddr , Bool ) step @@ -1444,7 +1176,6 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , !availableAcc , !sharedAcc , !peerAdvertisedKeysAcc - , !peersAcc , !sharedChangedAcc ) (txid, txSize) @@ -1453,36 +1184,26 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize , IntMap.delete k availableAcc , sharedAcc' , IntSet.delete k peerAdvertisedKeysAcc - , peersAcc , sharedChangedAcc' ) | mempoolHasTx txid = - let (sharedAcc'', advertisers) = - case IntMap.lookup k (sharedTxTable sharedAcc') of - Just _ -> - removeAdvertisingPeersForResolvedTxExcept peeraddr txKey sharedAcc' - Nothing -> - (sharedAcc', Set.empty) - wakePeers = - Set.union peersAcc (Set.delete peeraddr advertisers) - in ( txKey : unacknowledgedAcc - , IntMap.delete k availableAcc - , sharedAcc'' { - sharedTxTable = IntMap.delete k (sharedTxTable sharedAcc''), - sharedRetainedTxs = - retainedInsertMax k retainUntil (sharedRetainedTxs sharedAcc'') - } - , IntSet.delete k peerAdvertisedKeysAcc - , wakePeers - , True - ) + ( txKey : unacknowledgedAcc + , IntMap.delete k availableAcc + , sharedAcc' { + sharedTxTable = IntMap.delete k (sharedTxTable sharedAcc'), + sharedRetainedTxs = + retainedInsertMax k retainUntil (sharedRetainedTxs sharedAcc') + } + , IntSet.delete k peerAdvertisedKeysAcc + , True + ) | otherwise = case IntMap.lookup k (sharedTxTable sharedAcc') of Nothing -> let txEntry = TxEntry { txLease = TxClaimable now, - txAdvertiserCount = 1, - txAttempts = Map.empty, + txAttempt = 0, + txInSubmission = False, currentMaxInflightMultiplicity = txInflightMultiplicity policy } @@ -1492,43 +1213,20 @@ handleReceivedTxIds mempoolHasTx now policy peeraddr requestedTxIds txidsAndSize sharedTxTable = IntMap.insert k txEntry (sharedTxTable sharedAcc') } , IntSet.insert k peerAdvertisedKeysAcc - , peersAcc , True ) - Just txEntry -> - let (entryChanged, txEntry', peerAdvertisedKeysAcc') = - addAdvertiser k peerAdvertisedKeysAcc txEntry - availableAcc' = IntMap.insert k txSize availableAcc - sharedAcc'' = - if entryChanged - then - sharedAcc' { - sharedTxTable = IntMap.insert k txEntry' (sharedTxTable sharedAcc') - } - else - sharedAcc' - in ( txKey : unacknowledgedAcc - , availableAcc' - , sharedAcc'' - , peerAdvertisedKeysAcc' - , peersAcc - , sharedChangedAcc' || entryChanged - ) + Just _ -> + ( txKey : unacknowledgedAcc + , IntMap.insert k txSize availableAcc + , sharedAcc' + , IntSet.insert k peerAdvertisedKeysAcc + , sharedChangedAcc' + ) where retainedAcc = sharedRetainedTxs sharedAcc' sharedChangedAcc' = sharedChangedAcc || txKeyWasNew (txKey@(TxKey k), txKeyWasNew, sharedAcc') = lookupOrInternTxId txid sharedAcc - addAdvertiser k peerAdvertisedKeysAcc txEntry@TxEntry { txAdvertiserCount } = - if IntSet.member k peerAdvertisedKeysAcc - then - (False, txEntry, peerAdvertisedKeysAcc) - else - ( True - , txEntry { txAdvertiserCount = txAdvertiserCount + 1 } - , IntSet.insert k peerAdvertisedKeysAcc - ) - lookupOrInternTxId txid st@SharedTxState { sharedTxIdToKey, sharedKeyToTxId, sharedNextTxKey } | Just key <- Map.lookup rawId sharedTxIdToKey = (key, False, st) | otherwise = diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 5d12448504b..3004672d8f9 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -50,7 +50,6 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.Types , TxOwnerAckState (..) , TxAdvertiser (..) , RequestedTxBatch (..) - , TxAttemptState (..) , TxLease (..) , TxEntry (..) , TxIdsReqFlavour (..) @@ -58,9 +57,8 @@ module Ouroboros.Network.TxSubmission.Inbound.V2.Types , PeerPhase (..) , PeerScore (..) , PeerTxLocalState (..) - , SharedPeerState (..) - , peerGenerationOf - , bumpPeerGenerations + , PeerTxInFlight (..) + , emptyPeerTxInFlight -- TxKey with helper functions , TxKey (..) , lookupTxKey @@ -82,7 +80,6 @@ import NoThunks.Class.Orphans () import Data.Map.Strict (Map) import Data.Map.Strict qualified as Map import Data.Sequence.Strict (StrictSeq) -import Data.Set qualified as Set import Data.Time.Clock (diffTimeToPicoseconds) import Data.Typeable (Typeable, eqT, (:~:) (Refl)) import GHC.Generics (Generic) @@ -135,29 +132,6 @@ newtype TxAdvertiser = TxAdvertiser { deriving newtype NFData --- | Per-peer attempt state for one tx body. --- --- V2 keeps the actual tx body in peer-local state, but shared state still --- needs to know whether each advertiser is currently downloading, has a --- buffered body ready, or is submitting it to the mempool. -data TxAttemptState - = -- | The peer is currently downloading the tx body from another peer. - -- The tx body is being fetched and has not yet been received. - TxDownloading - - | -- | The peer has finished downloading the tx body and it is buffered - -- locally, waiting to be submitted to the mempool. - TxBuffered - - | -- | The peer is submitting the tx body to the mempool. This - -- is the final state before the transaction leaves the tracking - -- system (either accepted into the mempool or rejected). - TxSubmitting - deriving stock (Eq, Ord, Show, Generic) - deriving anyclass NFData - -instance NoThunks TxAttemptState - -- | The current download lease for a tx body. -- -- A tx is either currently leased to a peer until a deadline or it is @@ -175,31 +149,86 @@ data TxLease peeraddr = TxLeased !peeraddr !Time -- peer that advertises a new txid becomes its initial owner. If the -- lease expires without a successful outcome, the tx becomes claimable -- and another eligible advertiser may atomically claim it. +-- +-- Per-peer attempt detail is tracked in each peer's 'PeerTxInFlight' +-- TVar (owned by 'withPeer'). The shared entry only carries +-- aggregate counts: how many peers are currently attempting this body +-- and whether any peer is mid mempool submission. data TxEntry peeraddr = TxEntry { - -- | Current owner lease for downloading the tx body. + -- | Current owner lease for downloading the tx body. When + -- 'TxClaimable', the embedded 'Time' also doubles as the + -- last-activity stamp used by the orphan sweep. txLease :: !(TxLease peeraddr), - -- | Number of peers that still advertise this tx. - -- - -- The actual advertiser membership is tracked per peer in - -- 'SharedPeerState'. The count is retained here so that hot-path scans can - -- stop once all advertisers have been found. - txAdvertiserCount :: !Int, + -- | Number of peers currently attempting the body (claimed lease + -- and/or buffered locally). Incremented on claim, decremented on + -- omit / lease release / 'PeerSubmitTxs' transition (the + -- submitting peer no longer counts as an attempter). + txAttempt :: !Int, - -- | Current per-peer attempt state for this tx body. - txAttempts :: !(Map peeraddr TxAttemptState), + -- | At least one peer is currently inside @mempoolAddTxs@ for this + -- body. Other peers use this flag, combined with their own + -- 'pifSubmitting' set, to skip 'PeerSubmitTxs' for the same key. + -- STM serialisation guarantees at most one peer is the submitter + -- at any moment. + txInSubmission :: !Bool, -- | Effective per-tx inflight multiplicity cap. -- -- Initialised from 'txInflightMultiplicity' of the policy when the -- entry is created, and bumped by one when a peer's attempt sits past - -- 'inflightTimeout' without reaching 'TxSubmitting', allowing another + -- 'inflightTimeout' without reaching submission, allowing another -- peer to attempt in parallel. currentMaxInflightMultiplicity :: !Int } deriving stock (Eq, Show, Generic) deriving anyclass (NFData, NoThunks) +-- | Per-peer in-flight tracking, owned by 'withPeer'. +-- +-- The peer thread updates this TVar in the same STM transaction as +-- the shared 'TxEntry' so the two stay coherent. Two readers: +-- +-- * On exception, 'withPeer's bracket finalizer reads it and reverses +-- each per-peer contribution: releases any held lease, decrements +-- 'txAttempt' for each attempted key, clears 'txInSubmission' for +-- any in-flight submission. +-- +-- * The sweep walks every live peer's TVar to compute the union of +-- advertised keys and skips those entries when looking for orphans +-- to retire (the entry is still wanted by an active peer that +-- may simply be slow to claim). +data PeerTxInFlight = PeerTxInFlight { + -- | Keys this peer currently has in its advertised window. Used + -- by the orphan sweep to know the entry is still wanted, and by + -- 'nextWakeDelay' to scan for the earliest claim wake. + pifAdvertised :: !IntSet, + + -- | Keys this peer currently holds 'TxLeased' on. Cleared on + -- omit, accept, reject, lease-loss and bracket exit. + pifLeased :: !IntSet, + + -- | Keys this peer currently counts toward 'txAttempt'. Spans + -- the @claim -> download -> buffer@ phases; the key moves to + -- 'pifSubmitting' on the @submit@ transition. + pifAttempting :: !IntSet, + + -- | Keys this peer currently counts toward 'txInSubmission'. + -- Set on 'PeerSubmitTxs' / 'markSubmittingTxs', cleared on accept + -- or reject. + pifSubmitting :: !IntSet + } + deriving stock (Eq, Show, Generic) + deriving anyclass (NFData, NoThunks) + +emptyPeerTxInFlight :: PeerTxInFlight +emptyPeerTxInFlight = PeerTxInFlight { + pifAdvertised = IntSet.empty, + pifLeased = IntSet.empty, + pifAttempting = IntSet.empty, + pifSubmitting = IntSet.empty + } + -- | Whether a txid request will be sent as a blocking or pipelined wire -- message. data TxIdsReqFlavour @@ -441,21 +470,11 @@ emptyPeerTxLocalState = PeerTxLocalState { peerScore = emptyPeerScore (Time 0) } --- | Small shared view of peer state used for lease claiming and peer --- selection. -data SharedPeerState = SharedPeerState { - sharedPeerAdvertisedTxKeys :: !IntSet, - sharedPeerGeneration :: !Word64 - } - deriving stock (Eq, Show, Generic) - deriving anyclass (NFData, NoThunks) - -- | Shared V2 state. -- -- There is no global decision thread. Peer worker threads coordinate by -- atomically reading and updating this shared state. data SharedTxState peeraddr txid = SharedTxState { - sharedPeers :: !(Map peeraddr SharedPeerState), -- | Active unresolved txs that still participate in leasing, buffering, -- submission and advertiser tracking. sharedTxTable :: !(IntMap (TxEntry peeraddr)), @@ -486,7 +505,6 @@ data RetainedTxs = RetainedTxs { emptySharedTxState :: SharedTxState peeraddr txid emptySharedTxState = SharedTxState { - sharedPeers = Map.empty, sharedTxTable = IntMap.empty, sharedRetainedTxs = retainedEmpty, sharedTxIdToKey = Map.empty, @@ -590,32 +608,6 @@ retainedExpiredKeys currentTime retained = expired {-# INLINE retainedExpiredKeys #-} -peerGenerationOf :: Ord peeraddr - => peeraddr - -> SharedTxState peeraddr txid - -> Word64 -peerGenerationOf peeraddr SharedTxState { sharedPeers } = - case Map.lookup peeraddr sharedPeers of - Just SharedPeerState { sharedPeerGeneration } -> sharedPeerGeneration - Nothing -> error "TxSubmission.V2.peerGenerationOf: missing peer" - -bumpPeerGenerations :: Ord peeraddr - => Set.Set peeraddr - -> SharedTxState peeraddr txid - -> SharedTxState peeraddr txid -bumpPeerGenerations peersToWake st@SharedTxState { sharedPeers } = - st { - sharedPeers = foldl' bumpOne sharedPeers (Set.toList peersToWake) - } - where - bumpOne peersMap peeraddr = - Map.adjust - (\sharedPeerState -> - sharedPeerState - { sharedPeerGeneration = sharedPeerGeneration sharedPeerState + 1 }) - peeraddr - peersMap - lookupTxKey :: HasRawTxId txid => txid -> SharedTxState peeraddr txid diff --git a/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs b/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs index e71edcd020e..4f069422748 100644 --- a/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs +++ b/ouroboros-network/orphan-instances/Ouroboros/Network/OrphanInstances.hs @@ -90,7 +90,7 @@ import Ouroboros.Network.Snocket (LocalAddress (..), RemoteAddress) import Ouroboros.Network.TxSubmission.Inbound.V2.Types (ProcessedTxCount (..), SharedTxState (..), TraceTxLogic (..), - TraceTxSubmissionInbound (..), TxAttemptState (..), TxEntry (..), + TraceTxSubmissionInbound (..), TxEntry (..), TxLease (..), TxSubmissionLogicVersion (..), retainedSize, retainedToList) import Ouroboros.Network.TxSubmission.Outbound (TraceTxSubmissionOutbound (..)) @@ -1792,7 +1792,6 @@ traceSharedTxStateToJSON => SharedTxState addr txid -> Value traceSharedTxStateToJSON SharedTxState { - sharedPeers, sharedTxTable, sharedRetainedTxs, sharedTxIdToKey, @@ -1801,20 +1800,13 @@ traceSharedTxStateToJSON SharedTxState { } = object [ "sharedGeneration" .= sharedGeneration - , "peerCount" .= Map.size sharedPeers , "activeTxCount" .= IntMap.size sharedTxTable , "retainedTxCount" .= retainedSize sharedRetainedTxs , "internedTxCount" .= Map.size sharedTxIdToKey , "leasedTxCount" .= leasedTxCount , "claimableTxCount" .= claimableTxCount - , "resolvedTxCount" .= resolvedTxCount - , "downloadingAttemptCount" .= downloadingAttemptCount - , "bufferedAttemptCount" .= bufferedAttemptCount - , "submittingAttemptCount" .= submittingAttemptCount - , "peerPhases" .= peerPhases - , "sharedPeers" .= [ (show peeraddr, show peerState) - | (peeraddr, peerState) <- Map.toList sharedPeers - ] + , "totalAttemptCount" .= totalAttemptCount + , "submittingTxCount" .= submittingTxCount , "sharedTxTable" .= [ (renderTxId txKey, show txEntry) | (txKey, txEntry) <- IntMap.toList sharedTxTable ] @@ -1832,25 +1824,11 @@ traceSharedTxStateToJSON SharedTxState { claimableTxCount = length [ () | TxEntry { txLease = TxClaimable _ } <- activeEntries ] - resolvedTxCount = 0 :: Int + totalAttemptCount = + sum [ txAttempt | TxEntry { txAttempt } <- activeEntries ] - downloadingAttemptCount = - sum [ length [ () | TxDownloading <- Map.elems txAttempts ] - | TxEntry { txAttempts } <- activeEntries - ] - - bufferedAttemptCount = - sum [ length [ () | TxBuffered <- Map.elems txAttempts ] - | TxEntry { txAttempts } <- activeEntries - ] - - submittingAttemptCount = - sum [ length [ () | TxSubmitting <- Map.elems txAttempts ] - | TxEntry { txAttempts } <- activeEntries - ] - - peerPhases :: [(String, Int)] - peerPhases = [] + submittingTxCount = + length [ () | TxEntry { txInSubmission = True } <- activeEntries ] renderTxId txKey = maybe "" show (IntMap.lookup txKey sharedKeyToTxId) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/Diffusion/Node/Kernel.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/Diffusion/Node/Kernel.hs index 51c765cb4bb..c0a5ac392c8 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/Diffusion/Node/Kernel.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/Diffusion/Node/Kernel.hs @@ -83,8 +83,9 @@ import Ouroboros.Network.PeerSharing (PeerSharingAPI, PeerSharingRegistry (..), ps_POLICY_PEER_SHARE_MAX_PEERS, ps_POLICY_PEER_SHARE_STICKY_TIME) import Ouroboros.Network.Protocol.Handshake.Unversioned import Ouroboros.Network.Snocket (TestAddress (..)) -import Ouroboros.Network.TxSubmission.Inbound.V2.Registry (SharedTxStateVar, - TxSubmissionCountersVar, newSharedTxStateVar, +import Ouroboros.Network.TxSubmission.Inbound.V2.Registry + (PeerTxInFlightRegistry, SharedTxStateVar, TxSubmissionCountersVar, + newPeerTxInFlightRegistry, newSharedTxStateVar, newTxSubmissionCountersVar) import Ouroboros.Network.TxSubmission.Inbound.V2.Types (emptySharedTxState) @@ -322,7 +323,10 @@ data NodeKernel header block s txid m = NodeKernel { :: TxSubmissionCountersVar m, nkSharedTxStateVar - :: SharedTxStateVar m NtNAddr txid + :: SharedTxStateVar m NtNAddr txid, + + nkPeerTxInFlightRegistry + :: PeerTxInFlightRegistry m NtNAddr } newNodeKernel :: ( MonadTraceSTM m @@ -352,6 +356,7 @@ newNodeKernel psRng _txSeed txs = do <*> newMempool txs <*> newTxSubmissionCountersVar mempty <*> newSharedTxStateVar emptySharedTxState + <*> newPeerTxInFlightRegistry -- | Register a new upstream chain-sync client. -- diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs index 5483ce414b6..b3414be5ce9 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs @@ -185,6 +185,7 @@ runTxSubmission tracer _tracerTxLogic st0 peerImpairmentMap txDecisionPolicy = d duplicateTxIdsVar <- Lazy.newTVarIO [] sharedTxStateVar <- newSharedTxStateVar emptySharedTxState + inFlightRegistry <- newPeerTxInFlightRegistry txCountersVar <- newTxSubmissionCountersVar mempty traceTVarIO sharedTxStateVar \_ -> return . TraceDynamic . TxStateTrace labelTVarIO sharedTxStateVar "shared-tx-state" @@ -212,6 +213,7 @@ runTxSubmission tracer _tracerTxLogic st0 peerImpairmentMap txDecisionPolicy = d withPeer txDecisionPolicy (getMempoolReader inboundMempool) sharedTxStateVar + inFlightRegistry txCountersVar addr $ \api -> do let server = diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 39f22fc41a4..74c876960a1 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -7,7 +7,6 @@ module Test.Ouroboros.Network.TxSubmission.TxLogic , ArbTxDecisionPolicy (..) , PeerAddr , ArbSharedTxState (..) - , ArbSharedPeerState (..) , ArbPeerTxLocalState (..) , ReceiveDuplicateFixture , PeerActionFixture @@ -21,6 +20,7 @@ module Test.Ouroboros.Network.TxSubmission.TxLogic , runFanoutLoop , sharedTxStateInvariant , peerTxLocalStateInvariant + , peerTxInFlightInvariant , combinedStateInvariant , InvariantStrength (..) ) where @@ -34,7 +34,7 @@ import Data.IntMap.Strict qualified as IntMap import Data.IntSet qualified as IntSet import Data.List (elemIndex, mapAccumL, nub, nubBy, sortBy) import Data.Map.Strict qualified as Map -import Data.Maybe (isJust, listToMaybe) +import Data.Maybe (listToMaybe) import Data.Sequence.Strict qualified as StrictSeq import Data.Set qualified as Set import Data.Word (Word64) @@ -44,7 +44,7 @@ import NoThunks.Class (NoThunks, unsafeNoThunks) import Ouroboros.Network.Protocol.TxSubmission2.Type -import Ouroboros.Network.Tx (HasRawTxId (..), RawTxId, getRawTxId) +import Ouroboros.Network.Tx (HasRawTxId (..), getRawTxId) import Ouroboros.Network.TxSubmission.Inbound.V2.Policy import Ouroboros.Network.TxSubmission.Inbound.V2.State import Ouroboros.Network.TxSubmission.Inbound.V2.Types @@ -71,35 +71,32 @@ tests = , testProperty "shrink does not contain the original value" prop_TriggerScenario_shrinkExcludesOriginal ] - , testProperty "handleReceivedTxIds handles mixed new / retained / mempool txids" prop_handleReceivedTxIds - , testCaseSteps "handleReceivedTxIds adds the current peer as an advertiser for active txs" unit_handleReceivedTxIds_addsAdvertiserForActiveTxs - , testCaseSteps "nextPeerAction lets another peer claim a fresh tx when the first advertiser is full" unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull - , testProperty "handleReceivedTxs handles mixed buffered / omitted / late-retained / late-mempool / pruned txids" prop_handleReceivedTxs - , testProperty "handleSubmittedTxs retains accepted and drops rejected" prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected , testProperty "nextPeerAction processes all multi-peer triggers" prop_nextPeerAction_processesAllTriggers - , testProperty "nextPeerAction claims claimable tx for best idle advertiser" prop_nextPeerAction_claimsClaimableTx - , testCaseSteps "nextPeerAction claims a released tx from another advertiser" unit_nextPeerAction_claimsRejectedTxFromOtherAdvertiser - , testCaseSteps "nextPeerAction claims a tx once the score delay threshold has elapsed" unit_nextPeerAction_claimsAtScoreDelayThreshold , testCaseSteps "peerScore decays linearly over time at scoreRate" unit_peerScore_decaysOverTime , testCaseSteps "applyPeerRejections drains the existing score before adding the new rejection count" unit_applyPeerRejections_drainsThenAdds - , testProperty "nextPeerAction picks txs respecting the inflight size budget" prop_nextPeerAction_picksTxsRespectingBudget - , testCaseSteps "nextPeerAction skips blocked available txs and requests later claimable ones" unit_nextPeerAction_skipsBlockedAvailableTxs + , testProperty "handleReceivedTxIds classifies incoming txids" prop_handleReceivedTxIds + , testCaseSteps "handleReceivedTxIds tracks the advertise without mutating an existing entry" unit_handleReceivedTxIds_advertisesExistingEntry + , testProperty "handleReceivedTxs buffers requested bodies and releases omitted ones" prop_handleReceivedTxs + , testProperty "handleSubmittedTxs retains accepted and drops rejected" prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected + , testProperty "nextPeerAction returns the current shared generation when idle" prop_nextPeerAction_returnsSharedGeneration + , testProperty "nextPeerAction respects the inflight size budget" prop_nextPeerAction_picksTxsRespectingBudget , testProperty "nextPeerAction submits buffered owned txs before acking" prop_nextPeerAction_ownerSubmitsBuffered - , testCaseSteps "nextPeerAction requests other txs despite a blocked buffered tx" unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx - , testCaseSteps "nextPeerAction only acks the safe prefix before a blocked buffered tx" unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx - , testProperty "nextPeerAction keeps non-owner txids unacked until resolved" prop_nextPeerAction_nonOwnerWaitsUntilResolved - , testProperty "nextPeerActionPipelined suppresses ack-only txid requests" prop_nextPeerActionPipelined_requiresAckAndReq - , testProperty "nextPeerActionPipelined requests txids when it can ack and request" prop_nextPeerActionPipelined_requestsTxIds - , testCaseSteps "nextPeerActionPipelined keeps one txid unacked while body replies are in flight" unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply - , testProperty "nextPeerActionPipelined opens a second outstanding body batch" prop_nextPeerActionPipelined_secondBodyBatch - , testProperty "nextPeerActionPipelined does not open a third outstanding body batch" prop_nextPeerActionPipelined_noThirdBodyBatch , testProperty "nextPeerAction prunes expired retained txs" prop_nextPeerAction_prunesExpiredRetained , testProperty "nextPeerAction keeps retained txs before expiry" prop_nextPeerAction_keepsRetained - , testProperty "PeerDoNothing waits for the earliest shared expiry" prop_nextPeerAction_earliestWakeDelay - , testProperty "PeerDoNothing uses the current peer generation" prop_nextPeerAction_returnsPeerGeneration - , testProperty "handleSubmittedTxs bumps advertiser generations" prop_handleSubmittedTxs_bumpsAdvertisers - , testCaseSteps "advertisingPeersForTxExcept scans the authoritative peer key sets" unit_advertisingPeersForTxExcept_scansAuthoritativePeerSets - , testCaseSteps "removeAdvertisingPeersForResolvedTx clears all advertising peers for a resolved key" unit_removeAdvertisingPeersForResolvedTx_clearsAllAdvertisingPeers + , testProperty "nextPeerActionPipelined suppresses ack-only or request-only txid messages" prop_nextPeerActionPipelined_requiresAckAndReq + , testProperty "nextPeerActionPipelined emits a pipelined txid request when ack and request fire together" prop_nextPeerActionPipelined_requestsTxIds + , testProperty "nextPeerActionPipelined opens a second outstanding body batch" prop_nextPeerActionPipelined_secondBodyBatch + , testProperty "nextPeerActionPipelined does not open a third outstanding body batch" prop_nextPeerActionPipelined_noThirdBodyBatch + , testProperty "nextPeerAction's PeerDoNothing carries the earliest scheduled wake" prop_nextPeerAction_earliestWakeDelay + , testCaseSteps "nextPeerActionPipelined keeps one txid unacked while body replies are in flight" unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply + , testCaseSteps "nextPeerAction skips blocked-available txs and requests later claimable ones" unit_nextPeerAction_skipsBlockedAvailableTxs + , testCaseSteps "nextPeerAction only acks the safe prefix before a blocked buffered tx" unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx + , testCaseSteps "nextPeerAction lets another peer claim a fresh tx when the first advertiser is full" unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull + , testCaseSteps "nextPeerAction claims a released tx from another advertiser" unit_nextPeerAction_claimsRejectedTxFromOtherAdvertiser + , testCaseSteps "nextPeerAction claims a tx once the score delay threshold has elapsed" unit_nextPeerAction_claimsAtScoreDelayThreshold + , testCaseSteps "nextPeerAction requests other work despite a blocked buffered tx" unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx + , testProperty "nextPeerAction keeps non-owner txids unacked until resolved" prop_nextPeerAction_nonOwnerWaitsUntilResolved + , testProperty "nextPeerAction claims a claimable tx for the best idle advertiser" prop_nextPeerAction_claimsClaimableTx ] -- @@ -115,26 +112,6 @@ checkNoThunks name val = (name ++ " contains thunks: " ++ show info) (property False) --- | HUnit equivalent of 'checkNoThunks'. -assertNoThunks :: NoThunks a => String -> a -> Assertion -assertNoThunks name val = - val `seq` case unsafeNoThunks val of - Nothing -> pure () - Just info -> assertFailure (name ++ " contains thunks: " ++ show info) - --- | Evaluate a 'Property' once (no QuickCheck shrinking) and convert --- the verdict into an 'Assertion'. Useful for invariant helpers like --- 'combinedStateInvariant' that return 'Property' so they're directly --- usable from 'testProperty', but need bridging to 'testCaseSteps'. -assertProperty :: Testable prop => String -> prop -> Assertion -assertProperty name prop = do - result <- quickCheckWithResult - stdArgs { chatty = False, maxSuccess = 1 } - prop - case result of - Success {} -> pure () - _ -> assertFailure (name ++ ": " ++ output result) - -- -- InboundState properties -- @@ -201,6 +178,9 @@ data InvariantStrength = WeakInvariant -- lands in downloaded). -- * @peerRequestedTxs@ equals the union of all @requestedTxBatchSet@s -- and @peerRequestedTxsSize@ equals the sum of all batch sizes. +-- * @peerAdvertisedTxKeys@ is a subset of the unacknowledged queue (a +-- peer can only advertise keys it has actually received and not yet +-- acked). peerTxLocalStateInvariant :: forall tx. TxDecisionPolicy @@ -247,17 +227,45 @@ peerTxLocalStateInvariant TxDecisionPolicy { scoreMax } batchSizeSum = sum (fmap requestedTxBatchSize (toList peerRequestedTxBatches)) --- | Combined 'SharedTxState' / 'PeerTxLocalState' invariant. +-- | Per-peer 'PeerTxInFlight' invariant. +-- +-- A peer's attempting and submitting sets must be disjoint (a key is +-- in at most one phase per peer). Lease and submission stake imply +-- the peer is also tracking the key as advertised. +peerTxInFlightInvariant :: PeerTxInFlight -> Property +peerTxInFlightInvariant pif = + conjoin + [ counterexample "pifAttempting and pifSubmitting overlap" + (IntSet.null (pifAttempting pif `IntSet.intersection` pifSubmitting pif)) + , counterexample "pifLeased not contained in pifAttempting ∪ pifSubmitting" + (property (pifLeased pif + `IntSet.isSubsetOf` + (pifAttempting pif `IntSet.union` pifSubmitting pif))) + ] + +-- | Combined invariant over @SharedTxState@ and a snapshot of every +-- live peer's @(PeerTxLocalState, PeerTxInFlight)@. -- --- Runs the individual invariants on each piece and adds the cross-state --- coherence constraints: +-- Runs the per-piece invariants ('peerTxLocalStateInvariant', +-- 'peerTxInFlightInvariant', 'sharedTxStateInvariant') and adds the +-- cross-state coherence constraints that need both shared and per-peer +-- views to express: -- --- * The peer's 'sharedPeerAdvertisedTxKeys' (as recorded in the shared --- state) are a subset of the peer's local unacknowledged queue — a peer --- can only advertise keys it has actually received. --- * Those advertised keys must have a matching entry in 'sharedTxTable': --- an advertisement without an active tx entry is an orphan and would --- leave 'txAdvertiserCount' out of sync with the peer key sets. +-- * Every key in any peer's 'pifAdvertised' references an active +-- entry or a retained one (never falls out of both). +-- * Each entry's 'txAttempt' equals the number of peers in the map +-- whose 'pifAttempting' contains the key. +-- * Each entry's 'txInSubmission' is true iff some peer in the map +-- has the key in its 'pifSubmitting'. +-- * If an entry is 'TxLeased peer _' and that peer is in the map, +-- the peer's 'pifLeased' contains the key. Stale leases (peer's +-- 'pifLeased' has a key that the entry no longer leases to them) +-- are tolerated since theft via inflight-cap bumps does not +-- proactively scrub the loser's set. +-- +-- Single-peer call sites pass a @Map.singleton@; multi-peer scenarios +-- pass the full schedule snapshot so the cross-peer counter checks +-- are exercised. combinedStateInvariant :: forall peeraddr txid tx. ( Ord peeraddr @@ -268,26 +276,54 @@ combinedStateInvariant ) => TxDecisionPolicy -> InvariantStrength - -> peeraddr - -> PeerTxLocalState tx + -> Map.Map peeraddr (PeerTxLocalState tx, PeerTxInFlight) -> SharedTxState peeraddr txid -> Property -combinedStateInvariant policy strength peeraddr peerState sharedState = - conjoin - [ peerTxLocalStateInvariant policy peerState - , sharedTxStateInvariant strength sharedState - , counterexample "advertised keys escape the peer's unacknowledged queue" - (property (advertisedKeys `IntSet.isSubsetOf` unackKeys)) - , counterexample "advertised keys have no matching sharedTxTable entry" - (property (advertisedKeys `IntSet.isSubsetOf` IntMap.keysSet (sharedTxTable sharedState))) - ] - where - unackKeys = - IntSet.fromList [ k | TxKey k <- toList (peerUnacknowledgedTxIds peerState) ] - advertisedKeys = - maybe IntSet.empty sharedPeerAdvertisedTxKeys - (Map.lookup peeraddr (sharedPeers sharedState)) - +combinedStateInvariant policy strength peers sharedState = + let activeKeys = IntMap.keysSet (sharedTxTable sharedState) + retKeys = retainedKeysSet (sharedRetainedTxs sharedState) + liveKeys = activeKeys `IntSet.union` retKeys + in conjoin $ + [ counterexample ("PeerTxLocalState invariant violated for peer " + ++ show p) + (peerTxLocalStateInvariant policy ps) + | (p, (ps, _)) <- Map.toList peers ] + ++ [ counterexample ("PeerTxInFlight invariant violated for peer " + ++ show p) + (peerTxInFlightInvariant pif) + | (p, (_, pif)) <- Map.toList peers ] + ++ [ sharedTxStateInvariant strength sharedState ] + ++ [ counterexample ("peer " ++ show p + ++ " advertises keys that are neither active nor retained") + (property (pifAdvertised pif `IntSet.isSubsetOf` liveKeys)) + | (p, (_, pif)) <- Map.toList peers ] + ++ [ counterexample ("txAttempt mismatch for entry " ++ show k + ++ ": expected " ++ show expectedAttempt + ++ " (sum across peers' pifAttempting)" + ++ ", got " ++ show (txAttempt entry)) + (txAttempt entry === expectedAttempt) + | (k, entry) <- IntMap.toList (sharedTxTable sharedState) + , let expectedAttempt = + sum [ if IntSet.member k (pifAttempting pif) then 1 else 0 + | (_, (_, pif)) <- Map.toList peers ] + ] + ++ [ counterexample ("txInSubmission mismatch for entry " ++ show k + ++ ": expected " ++ show expectedSubmit) + (txInSubmission entry === expectedSubmit) + | (k, entry) <- IntMap.toList (sharedTxTable sharedState) + , let expectedSubmit = + any (\(_, (_, pif)) -> IntSet.member k (pifSubmitting pif)) + (Map.toList peers) + ] + ++ [ counterexample ("entry " ++ show k + ++ " is TxLeased to peer " ++ show owner + ++ " but the peer's pifLeased does not contain it") + (case Map.lookup owner peers of + Nothing -> property True + Just (_, ownerPif) -> property (IntSet.member k (pifLeased ownerPif))) + | (k, entry) <- IntMap.toList (sharedTxTable sharedState) + , TxLeased owner _ <- [txLease entry] + ] -- | 'InboundState` invariant. @@ -304,7 +340,6 @@ sharedTxStateInvariant -> SharedTxState peeraddr txid -> Property sharedTxStateInvariant strength SharedTxState { - sharedPeers, sharedTxTable, sharedRetainedTxs, sharedTxIdToKey, @@ -324,17 +359,17 @@ sharedTxStateInvariant strength SharedTxState { (property (all (< sharedNextTxKey) (IntSet.toList liveKeys))) ] ++ case strength of - WeakInvariant -> - fmap checkTxEntry activeEntries - StrongInvariant -> - fmap checkTxEntry activeEntries - ++ [ counterexample "active tx entry without any liveness source" - (property (all activeEntryLive (IntMap.elems sharedTxTable))) - ] + -- Both strengths now run the per-entry well-formedness check. + -- "Self-evident liveness" (lease or txAttempt > 0 or + -- txInSubmission) is no longer a shared-state invariant: an + -- entry can sit TxClaimable with 'txAttempt = 0' while a live + -- peer's 'pifAdvertised' keeps it from being swept. The sweep + -- itself enforces global liveness; per-peer invariants live in + -- 'combinedStateInvariant'. + _ -> fmap checkTxEntry activeEntries where liveKeys = IntMap.keysSet sharedTxTable `IntSet.union` retainedKeysSet sharedRetainedTxs activeEntries = IntMap.toList sharedTxTable - knownPeers = Map.keysSet sharedPeers keysRoundTripForward = all (\(rawId, txKey) -> fmap getRawTxId (IntMap.lookup (unTxKey txKey) sharedKeyToTxId) == Just rawId) @@ -344,33 +379,13 @@ sharedTxStateInvariant strength SharedTxState { all (\(k, txid) -> Map.lookup (getRawTxId txid) sharedTxIdToKey == Just (TxKey k)) (IntMap.toList sharedKeyToTxId) - advertisersForKey k = - Map.keysSet $ - Map.filter - (\SharedPeerState { sharedPeerAdvertisedTxKeys } -> - IntSet.member k sharedPeerAdvertisedTxKeys) - sharedPeers - - activeEntryLive TxEntry { txLease, txAdvertiserCount, txAttempts } = - leaseLive txLease - || txAdvertiserCount > 0 - || not (Map.null txAttempts) - - leaseLive TxClaimable {} = False - leaseLive TxLeased {} = True - - checkTxEntry (k, txEntry@TxEntry { txLease, txAdvertiserCount, txAttempts }) = + checkTxEntry (k, txEntry@TxEntry { txAttempt, txInSubmission }) = counterexample ("bad active tx entry " ++ show k ++ ": " ++ show txEntry) $ - let txAdvertisers = advertisersForKey k in conjoin - [ property (txAdvertisers `Set.isSubsetOf` knownPeers) - , txAdvertiserCount === Set.size txAdvertisers - , property (Map.keysSet txAttempts `Set.isSubsetOf` txAdvertisers) - , case txLease of - TxClaimable _ -> - property True - TxLeased owner _ -> - property (Map.member owner sharedPeers && Set.member owner txAdvertisers) + [ counterexample "txAttempt is negative" + (property (txAttempt >= 0)) + , counterexample "txInSubmission without any peer in submission" + (property (not txInSubmission || txAttempt >= 0)) ] newtype ArbTxDecisionPolicy = ArbTxDecisionPolicy TxDecisionPolicy @@ -379,9 +394,6 @@ newtype ArbTxDecisionPolicy = ArbTxDecisionPolicy TxDecisionPolicy newtype ArbSharedTxState = ArbSharedTxState (SharedTxState PeerAddr TxId) deriving Show -newtype ArbSharedPeerState = ArbSharedPeerState SharedPeerState - deriving Show - newtype ArbPeerTxLocalState = ArbPeerTxLocalState (PeerTxLocalState (Tx TxId)) deriving Show @@ -404,7 +416,6 @@ data TxIdGroupTag = TxIdNew | TxIdRetained | TxIdMempool - | TxIdMempoolResolvesActive deriving (Eq, Ord, Show) instance Arbitrary TxIdGroupTag where @@ -412,73 +423,8 @@ instance Arbitrary TxIdGroupTag where [ (12, pure TxIdNew) , (4, pure TxIdRetained) , (4, pure TxIdMempool) - , (1, pure TxIdMempoolResolvesActive) - ] - --- | Per-requested-txid fate, driving the coherent pre-state for --- 'handleReceivedTxs' properties: --- --- * 'RfBuffered': body is in the reply, entry is active with peeraddr's --- TxDownloading attempt. Expected: attempt flipped to TxBuffered, body --- added to peerDownloadedTxs. @Bool@ = co-advertised by another peer. --- * 'RfOmitted': body is not in the reply, entry is active with peeraddr's --- TxDownloading attempt. Expected: lease released; entry survives if --- co-advertised, otherwise reaped by dropDeadActiveKeys. @Bool@ = --- co-advertised. --- * 'RfLateRetained': body is in the reply, but the key is already in --- sharedRetainedTxs (no sharedTxTable entry). Expected: body dropped, --- lateCount incremented, no state change beyond the usual peer bookkeeping. --- * 'RfLateMempool': body is in the reply, entry is active with peeraddr's --- TxDownloading attempt, and the callback reports the tx as already in --- the mempool. Expected: body dropped, entry moved from sharedTxTable --- to sharedRetainedTxs, advertising stripped, any other advertiser woken. --- * 'RfOmittedPruned': body is not in the reply, and the key is not in --- sharedState at all (fully pruned by some concurrent cleanup before --- the reply arrives). Expected: omittedCount incremented, no shared-state --- change (@keyWasLive@ is False so 'handleOmitted' takes the count-only --- branch). -data RequestedFate - = RfBuffered !Bool - | RfOmitted !Bool - | RfLateRetained - | RfLateMempool - | RfOmittedPruned - deriving (Eq, Show) - -instance Arbitrary RequestedFate where - arbitrary = frequency - [ (4, pure (RfBuffered False)) - , (2, pure (RfBuffered True)) - , (3, pure (RfOmitted False)) - , (2, pure (RfOmitted True)) - , (1, pure RfLateRetained) - , (1, pure RfLateMempool) - , (1, pure RfOmittedPruned) ] -rfInReply :: RequestedFate -> Bool -rfInReply RfBuffered{} = True -rfInReply RfOmitted{} = False -rfInReply RfLateRetained = True -rfInReply RfLateMempool = True -rfInReply RfOmittedPruned = False - -rfCoAdvertised :: RequestedFate -> Bool -rfCoAdvertised (RfBuffered c) = c -rfCoAdvertised (RfOmitted c) = c -rfCoAdvertised _ = False - -rfGoesToActive :: RequestedFate -> Bool -rfGoesToActive RfBuffered{} = True -rfGoesToActive RfOmitted{} = True -rfGoesToActive RfLateMempool = True -rfGoesToActive RfLateRetained = False -rfGoesToActive RfOmittedPruned = False - -rfIsPruned :: RequestedFate -> Bool -rfIsPruned RfOmittedPruned = True -rfIsPruned _ = False - instance Arbitrary ArbTxDecisionPolicy where arbitrary = frequency @@ -537,19 +483,6 @@ instance Arbitrary ArbTxDecisionPolicy where , realToFrac x > interTxSpace a ] -instance Arbitrary ArbSharedPeerState where - arbitrary = ArbSharedPeerState <$> genSharedPeerState - - shrink (ArbSharedPeerState peerState) - | peerState == defaultPeerState = [] - | otherwise = - [ ArbSharedPeerState defaultPeerState - , ArbSharedPeerState peerState - { sharedPeerGeneration = 0 } - ] - where - defaultPeerState = mkSharedPeerState - instance Arbitrary ArbPeerTxLocalState where arbitrary = ArbPeerTxLocalState <$> genPeerTxLocalState @@ -573,1149 +506,1729 @@ instance Arbitrary ArbSharedTxState where | sharedState == emptySharedTxState = [] | otherwise = ArbSharedTxState <$> shrinkSharedTxState sharedState +-- +-- Peer score tests +-- + +-- | 'currentPeerScore' decays the score linearly at 'scoreRate' per second. +unit_peerScore_decaysOverTime :: (String -> IO ()) -> Assertion +unit_peerScore_decaysOverTime step = do + step "After 50 seconds at scoreRate 0.1 a score of 10 should drain to 5" + currentPeerScore policy (Time 50) score0 @?= 5 + step "After 200 seconds the score should be fully decayed (clamped to 0)" + currentPeerScore policy (Time 200) score0 @?= 0 + step "Reading at the same instant as the last update returns the unchanged value" + currentPeerScore policy (Time 0) score0 @?= peerScoreValue score0 + where + policy = defaultTxDecisionPolicy + score0 = PeerScore { peerScoreValue = 10, peerScoreTs = Time 0 } + +-- | A new rejection drains the existing score at 'scoreRate' per second +-- since its last update before adding the rejection count. +unit_applyPeerRejections_drainsThenAdds :: (String -> IO ()) -> Assertion +unit_applyPeerRejections_drainsThenAdds step = do + step "Starting score 10 at Time 0; one rejection 50s later: 10 - (50 * 0.1) + 1 = 6" + fst (applyPeerRejections policy (Time 50) 1 peerState0) @?= 6 + where + policy = defaultTxDecisionPolicy + peerState0 = emptyPeerTxLocalState + { peerScore = PeerScore { peerScoreValue = 10 + , peerScoreTs = Time 0 } } + +-- +-- handleReceivedTxIds +-- --- Verifies that handleReceivedTxIds resolves each incoming txid according to --- its state: +-- | Verifies that 'handleReceivedTxIds' classifies each incoming txid: -- --- * 'TxIdNew' — new claimable entry for @peeraddr@ in sharedTxTable. --- * 'TxIdRetained' — queued locally only; shared state unchanged. --- * 'TxIdMempool' — interned and added to sharedRetainedTxs. --- * 'TxIdMempoolResolvesActive' — active entry (advertised by another --- peer) is removed from sharedTxTable, moved to sharedRetainedTxs, the --- other peer's advertising for the key is cleared, and (if idle) its --- generation is bumped. +-- * 'TxIdNew' — interned and a fresh 'TxClaimable' entry is created; +-- the key joins 'peerAvailableTxIds' and 'pifAdvertised'. +-- * 'TxIdRetained' — already in 'sharedRetainedTxs'; no entry created +-- and the key is not added to 'pifAdvertised'. +-- * 'TxIdMempool' — interned, moved straight to 'sharedRetainedTxs' +-- and not added to 'pifAdvertised'. -- --- Also asserts the peer's pre-existing state and unrelated shared state are --- preserved, and that the combined invariant holds before and after. +-- Pre-state is empty peer-local and an otherwise empty shared state with +-- the retained group seeded. Asserts the combined invariant before and +-- after, plus per-group inclusion / exclusion in each piece of state. prop_handleReceivedTxIds :: ArbTxDecisionPolicy - -> ArbSharedTxState - -> ArbPeerTxLocalState -> NonEmptyList (TxId, Positive Int, TxIdGroupTag) - -> Positive Int -> Property -prop_handleReceivedTxIds - (ArbTxDecisionPolicy policy) - (ArbSharedTxState sharedState0) - (ArbPeerTxLocalState peerStateGenerated) - (NonEmpty taggedInput) - (Positive extraRequested) = - forAll (genPeerAddrBiased sharedState0) $ \peeraddr -> - let sharedStateWithPeer = ensurePeerAdvertisesTxKeys peeraddr [] sharedState0 - - -- Canonicalize input: normalise each (txid, size), then dedupe by txid - -- while preserving the first-seen tag. - dedupedTagged :: [((TxId, SizeInBytes), TxIdGroupTag)] - dedupedTagged = - nubBy ((==) `on` (fst . fst)) - [ ((abs txid + 1, mkSize txSize), tag) - | (txid, txSize, tag) <- taggedInput - ] +prop_handleReceivedTxIds (ArbTxDecisionPolicy policy) (NonEmpty taggedInput) = + let + -- Normalise: positive txids, non-zero sizes; dedupe by txid. + normalised :: [(TxId, SizeInBytes, TxIdGroupTag)] + normalised = + nubBy ((==) `on` (\(t,_,_) -> t)) + [ (abs txid + 1, mkSize sz, tag) + | (txid, sz, tag) <- taggedInput + ] - -- Shift all txids forward so they are disjoint from sharedStateWithPeer's - -- intern table and from each other. Preserves input order and tag mapping. - freshenedTxids :: [(TxId, SizeInBytes)] - freshenedTxids = - freshBatchAgainstSharedState sharedStateWithPeer (fmap fst dedupedTagged) - - taggedFreshened :: [((TxId, SizeInBytes), TxIdGroupTag)] - taggedFreshened = zip freshenedTxids (fmap snd dedupedTagged) - - (newGroup, retainedGroup, mempoolFreshGroup, resolveActiveCandidates) = - foldr partitionByTag ([], [], [], []) taggedFreshened - where - partitionByTag (e, TxIdNew) (n, r, m, a) = (e:n, r, m, a) - partitionByTag (e, TxIdRetained) (n, r, m, a) = (n, e:r, m, a) - partitionByTag (e, TxIdMempool) (n, r, m, a) = (n, r, e:m, a) - partitionByTag (e, TxIdMempoolResolvesActive) (n, r, m, a) = (n, r, m, e:a) - - -- Seed the retained group into the shared state first: intern the txids - -- and add them to sharedRetainedTxs. - sharedStateWithRetained = seedRetainedTxids policy retainedGroup sharedStateWithPeer - - -- Pick an advertiser peer for the resolve-active sub-group, if any peer - -- other than @peeraddr@ exists. If none is available, demote the - -- resolve-active candidates to fresh mempool entries so they still - -- exercise the mempool branch. - otherPeerOpt :: Maybe PeerAddr - otherPeerOpt = - case filter (/= peeraddr) - (Map.keys (sharedPeers sharedStateWithRetained)) of - [] -> Nothing - (p:_) -> Just p - - (mempoolResolveActiveGroup, mempoolGroup, sharedStateBase) = - case otherPeerOpt of - Just p | not (null resolveActiveCandidates) -> - ( resolveActiveCandidates - , mempoolFreshGroup - , seedActiveTxidsForOtherPeer p resolveActiveCandidates - sharedStateWithRetained - ) - _ -> - ( [] - , mempoolFreshGroup ++ resolveActiveCandidates - , sharedStateWithRetained - ) - - -- The full input list, in original (interleaved) order. - txidsAndSizes :: [(TxId, SizeInBytes)] - txidsAndSizes = freshenedTxids - - mempoolTxidSet :: Set.Set TxId - mempoolTxidSet = - Set.fromList - (fmap fst mempoolGroup ++ fmap fst mempoolResolveActiveGroup) - mempoolHasTx :: TxId -> Bool - mempoolHasTx = (`Set.member` mempoolTxidSet) - - oldKeys = IntMap.keysSet (sharedKeyToTxId sharedStateBase) - -- Keys that are in sharedStateBase's intern table AND whose sharedTxTable - -- entry is about to be removed by the mempool branch. We exclude them - -- from the "unchanged at old keys" assertions; their behaviour is - -- covered explicitly by checkMempoolResolveActiveEntry. - resolveActiveKeySet :: IntSet.IntSet - resolveActiveKeySet = - IntSet.fromList - [ unTxKey (lookupKeyOrFail txid sharedStateBase) - | (txid, _) <- mempoolResolveActiveGroup - ] - stableOldKeys = oldKeys `IntSet.difference` resolveActiveKeySet - requestedToReply = fromIntegral (length txidsAndSizes) - -- Shift generated peer-local keys past everything that - -- handleReceivedTxIds touches, so the pre-existing peer-local keys stay - -- disjoint from both the base state and the newly-allocated keys. - peerKeyShift = sharedNextTxKey sharedStateBase + length txidsAndSizes - preExistingAdvertised = - sharedPeerAdvertisedTxKeys (lookupPeerOrFail peeraddr sharedStateBase) - advertisedPrefix = - StrictSeq.fromList [ TxKey k | k <- IntSet.toList preExistingAdvertised ] - peerStateShifted = shiftPeerTxLocalStateKeys peerKeyShift peerStateGenerated - peerState0 = - peerStateShifted { - peerUnacknowledgedTxIds = - advertisedPrefix <> peerUnacknowledgedTxIds peerStateShifted, - peerRequestedTxIds = requestedToReply + fromIntegral extraRequested - } - oldPeerAvailableKeys = IntMap.keysSet (peerAvailableTxIds peerState0) - (peerState', sharedState') = - handleReceivedTxIds mempoolHasTx now policy peeraddr requestedToReply txidsAndSizes peerState0 sharedStateBase - - expectedRetainUntil = - addTime (bufferedTxsMinLifetime policy) now - - -- Only the new group extends the peer's advertised-key set. - expectedAdvertisedKeys = - preExistingAdvertised - `IntSet.union` - IntSet.fromList - [ unTxKey (lookupKeyOrFail txid sharedState') - | (txid, _) <- newGroup - ] + txidsAndSizes :: [(TxId, SizeInBytes)] + txidsAndSizes = [ (txid, sz) | (txid, sz, _) <- normalised ] - -- Keys newly interned during the call (new + mempool-fresh). Retained - -- and resolve-active keys were pre-interned by the seed helpers. - expectedNextTxKeyAdvance = length newGroup + length mempoolGroup - - -- The generation bumps iff the call actually changed shared state. Pure - -- retained-only input leaves shared state untouched. - expectedGenerationAdvance :: Word64 - expectedGenerationAdvance - | null newGroup && null mempoolGroup && null mempoolResolveActiveGroup = 0 - | otherwise = 1 - - -- Peers whose entry may differ between sharedStateBase and sharedState': - -- always @peeraddr@, and also the chosen advertiser if the mempool - -- branch resolved any active entry. - affectedPeers :: Set.Set PeerAddr - affectedPeers = - Set.insert peeraddr $ - case otherPeerOpt of - Just p | not (null mempoolResolveActiveGroup) -> Set.singleton p - _ -> Set.empty - - checkNewEntry (txid, size) = - let k = unTxKey (lookupKeyOrFail txid sharedState') in - case IntMap.lookup k (sharedTxTable sharedState') of - Nothing -> - counterexample ("missing new tx entry for " ++ show txid) (property False) - Just TxEntry { txLease, txAdvertiserCount, txAttempts } -> - conjoin - [ txLease === TxClaimable now - , txAdvertiserCount === 1 - , txAttempts === (Map.empty :: Map.Map PeerAddr TxAttemptState) - , counterexample "new txid missing from peerAvailableTxIds" - (IntMap.lookup k (peerAvailableTxIds peerState') === Just size) - , counterexample "new txid missing from peer advertised keys" - (property (IntSet.member k - (sharedPeerAdvertisedTxKeys - (lookupPeerOrFail peeraddr sharedState')))) - ] + newGroup = [ (txid, sz) | (txid, sz, TxIdNew) <- normalised ] + retainedGroup = [ (txid, sz) | (txid, sz, TxIdRetained) <- normalised ] + mempoolGroup = [ (txid, sz) | (txid, sz, TxIdMempool) <- normalised ] - checkRetainedEntry (txid, _) = - let k = unTxKey (lookupKeyOrFail txid sharedState') in - conjoin - [ counterexample "retained txid appears in sharedTxTable" - (property (IntMap.notMember k (sharedTxTable sharedState'))) - , counterexample "retained txid disappeared from sharedRetainedTxs" - (property (retainedMember k (sharedRetainedTxs sharedState'))) - , counterexample "retained txid leaked into peerAvailableTxIds" - (property (IntMap.notMember k (peerAvailableTxIds peerState'))) - , counterexample "retained txid leaked into peer advertised keys" - (property (IntSet.notMember k - (sharedPeerAdvertisedTxKeys - (lookupPeerOrFail peeraddr sharedState')))) - ] + mempoolHasTx :: TxId -> Bool + mempoolHasTx txid = txid `Set.member` Set.fromList (fmap fst mempoolGroup) - checkMempoolEntry (txid, _) = - let k = unTxKey (lookupKeyOrFail txid sharedState') in - conjoin - [ counterexample "mempool txid leaked into sharedTxTable" - (property (IntMap.notMember k (sharedTxTable sharedState'))) - , counterexample "mempool txid missing or wrong retain-until in sharedRetainedTxs" - (retainedLookup k (sharedRetainedTxs sharedState') - === Just expectedRetainUntil) - , counterexample "mempool txid leaked into peerAvailableTxIds" - (property (IntMap.notMember k (peerAvailableTxIds peerState'))) - , counterexample "mempool txid leaked into peer advertised keys" - (property (IntSet.notMember k - (sharedPeerAdvertisedTxKeys - (lookupPeerOrFail peeraddr sharedState')))) - ] + sharedState0 = seedRetainedTxids policy retainedGroup emptySharedTxState - -- A resolve-active entry was in sharedTxTable (advertised by an "other" - -- peer) before the call. The mempool branch deletes it from - -- sharedTxTable, inserts it into sharedRetainedTxs, and clears the - -- "other" peer's advertising for that key. peeraddr never advertised - -- the key (keys are freshened out of peeraddr's advertised range). - checkMempoolResolveActiveEntry (txid, _) = - let k = unTxKey (lookupKeyOrFail txid sharedState') in - conjoin - [ counterexample "resolve-active txid remained in sharedTxTable" - (property (IntMap.notMember k (sharedTxTable sharedState'))) - , counterexample "resolve-active txid missing or wrong retain-until" - (retainedLookup k (sharedRetainedTxs sharedState') - === Just expectedRetainUntil) - , counterexample "resolve-active txid leaked into peerAvailableTxIds" - (property (IntMap.notMember k (peerAvailableTxIds peerState'))) - , counterexample "resolve-active txid leaked into peer advertised keys" - (property (IntSet.notMember k - (sharedPeerAdvertisedTxKeys - (lookupPeerOrFail peeraddr sharedState')))) - , case otherPeerOpt of - Nothing -> property True - Just op -> - counterexample "other advertiser still lists resolve-active key" - (property (IntSet.notMember k - (sharedPeerAdvertisedTxKeys - (lookupPeerOrFail op sharedState')))) - ] + requestedToReply = fromIntegral (length txidsAndSizes) + peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = requestedToReply } + peerInFlight0 = emptyPeerTxInFlight + + (peerState', peerInFlight', sharedState') = + handleReceivedTxIds mempoolHasTx now policy + requestedToReply txidsAndSizes + peerState0 peerInFlight0 sharedState0 - -- If the resolve-active group is non-empty, the chosen @otherPeer@'s - -- post-call state should have the resolve-active keys stripped from its - -- advertising (reverting to what it advertised in sharedStateWithRetained) - -- and its generation bumped by 1 iff its phase is PeerIdle. - checkOtherPeerState = - case otherPeerOpt of - Just op | not (null mempoolResolveActiveGroup) -> - let original = lookupPeerOrFail op sharedStateWithRetained - post = lookupPeerOrFail op sharedState' in - conjoin - [ counterexample "other peer's advertised keys not restored" - (sharedPeerAdvertisedTxKeys post - === sharedPeerAdvertisedTxKeys original) - , counterexample "other peer's generation bump mismatch" - (sharedPeerGeneration post - === sharedPeerGeneration original + 1) + keyOf txid = unTxKey (lookupKeyOrFail txid sharedState') + + expectedAdvertisedKeys = + IntSet.fromList [ keyOf txid | (txid, _) <- newGroup ] + + expectedAvailableTxIds = + IntMap.fromList [ (keyOf txid, sz) | (txid, sz) <- newGroup ] + + expectedUnacked = + [ lookupKeyOrFail txid sharedState' | (txid, _) <- txidsAndSizes ] + + retainUntil = addTime (bufferedTxsMinLifetime policy) now + + checkNew (txid, _) = + let k = keyOf txid in + counterexample ("new tx " ++ show txid) $ conjoin + [ counterexample "missing TxClaimable entry" + (case IntMap.lookup k (sharedTxTable sharedState') of + Just txEntry -> conjoin + [ txLease txEntry === TxClaimable now + , txAttempt txEntry === 0 + , property (not (txInSubmission txEntry)) ] - _ -> property True - - checkExistingTxId (rawId, txKey) = - Map.lookup rawId (sharedTxIdToKey sharedState') === Just txKey in - classify (StrictSeq.null (peerUnacknowledgedTxIds peerStateGenerated)) - "generated peer-local state: empty unacknowledged queue" $ - classify (not (Map.member peeraddr (sharedPeers sharedState0))) - "peeraddr: fresh (not in generated sharedState)" $ - classify (not (IntSet.null preExistingAdvertised)) - "peeraddr: has pre-existing advertised keys" $ - classify (length txidsAndSizes /= length taggedInput) - "received txids: reduced by dedupe or fresh-shift" $ - classify (not (null newGroup)) "txids include new" $ - classify (not (null retainedGroup)) "txids include retained" $ - classify (not (null mempoolGroup)) "txids include mempool" $ - classify (not (null mempoolResolveActiveGroup)) "txids include resolve-active" $ - tabulate "received txids" [bucket (length txidsAndSizes)] $ - tabulate "new group" [bucket (length newGroup)] $ - tabulate "retained group" [bucket (length retainedGroup)] $ - tabulate "mempool group" [bucket (length mempoolGroup)] $ - tabulate "resolve-active group" [bucket (length mempoolResolveActiveGroup)] $ - tabulate "sharedState peers" [bucket (Map.size (sharedPeers sharedStateBase))] $ - tabulate "active txs" [bucket (IntMap.size (sharedTxTable sharedStateBase))] $ - tabulate "retained txs" [bucket (retainedSize (sharedRetainedTxs sharedStateBase))] $ + Nothing -> property False) + , counterexample "expected to be in retained" $ + property (not (retainedMember k (sharedRetainedTxs sharedState'))) + ] + + checkRetained (txid, _) = + let k = keyOf txid in + counterexample ("retained tx " ++ show txid) $ conjoin + [ counterexample "leaked into sharedTxTable" + (property (IntMap.notMember k (sharedTxTable sharedState'))) + , counterexample "missing from retained" + (property (retainedMember k (sharedRetainedTxs sharedState'))) + , counterexample "leaked into pifAdvertised" + (property (IntSet.notMember k (pifAdvertised peerInFlight'))) + ] + + checkMempool (txid, _) = + let k = keyOf txid in + counterexample ("mempool tx " ++ show txid) $ conjoin + [ counterexample "leaked into sharedTxTable" + (property (IntMap.notMember k (sharedTxTable sharedState'))) + , counterexample "missing or wrong retainUntil" + (retainedLookup k (sharedRetainedTxs sharedState') === Just retainUntil) + , counterexample "leaked into pifAdvertised" + (property (IntSet.notMember k (pifAdvertised peerInFlight'))) + ] + in + classify (not (null newGroup)) "txids include new" $ + classify (not (null retainedGroup)) "txids include retained" $ + classify (not (null mempoolGroup)) "txids include mempool" $ conjoin - [ peerRequestedTxIds peerState' === fromIntegral extraRequested - , toList (peerUnacknowledgedTxIds peerState') - === toList (peerUnacknowledgedTxIds peerState0) - ++ fmap (\(txid, _) -> lookupKeyOrFail txid sharedState') txidsAndSizes - , IntMap.size (peerAvailableTxIds peerState') - === IntMap.size (peerAvailableTxIds peerState0) + length newGroup - , IntMap.restrictKeys (peerAvailableTxIds peerState') oldPeerAvailableKeys - === peerAvailableTxIds peerState0 - , peerRequestedTxs peerState' === peerRequestedTxs peerState0 - , peerRequestedTxBatches peerState' === peerRequestedTxBatches peerState0 - , peerRequestedTxsSize peerState' === peerRequestedTxsSize peerState0 - , peerDownloadedTxs peerState' === peerDownloadedTxs peerState0 - , peerDownloadStartTime peerState' === peerDownloadStartTime peerState0 - , peerScore peerState' === peerScore peerState0 - , Map.withoutKeys (sharedPeers sharedState') affectedPeers - === Map.withoutKeys (sharedPeers sharedStateBase) affectedPeers - , sharedPeerAdvertisedTxKeys (lookupPeerOrFail peeraddr sharedState') - === expectedAdvertisedKeys - , IntMap.restrictKeys (sharedTxTable sharedState') stableOldKeys - === IntMap.restrictKeys (sharedTxTable sharedStateBase) stableOldKeys - , retainedRestrictKeys (sharedRetainedTxs sharedState') stableOldKeys - === retainedRestrictKeys (sharedRetainedTxs sharedStateBase) stableOldKeys - , IntMap.restrictKeys (sharedKeyToTxId sharedState') oldKeys - === sharedKeyToTxId sharedStateBase - , sharedGeneration sharedState' - === sharedGeneration sharedStateBase + expectedGenerationAdvance - , sharedNextTxKey sharedState' - === sharedNextTxKey sharedStateBase + expectedNextTxKeyAdvance - , conjoin (fmap checkExistingTxId (Map.toList (sharedTxIdToKey sharedStateBase))) - , conjoin (fmap checkNewEntry newGroup) - , conjoin (fmap checkRetainedEntry retainedGroup) - , conjoin (fmap checkMempoolEntry mempoolGroup) - , conjoin (fmap checkMempoolResolveActiveEntry mempoolResolveActiveGroup) - , checkOtherPeerState - , counterexample "combined invariant violated before the call" - (combinedStateInvariant policy StrongInvariant peeraddr peerState0 sharedStateBase) - , counterexample "combined invariant violated after the call" - (combinedStateInvariant policy StrongInvariant peeraddr peerState' sharedState') - , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedState'" sharedState' + [ counterexample "unacknowledged queue mismatch" + (toList (peerUnacknowledgedTxIds peerState') === expectedUnacked) + , counterexample "peerAvailableTxIds mismatch" + (peerAvailableTxIds peerState' === expectedAvailableTxIds) + , counterexample "pifAdvertised mismatch" + (pifAdvertised peerInFlight' === expectedAdvertisedKeys) + , counterexample "peerRequestedTxIds was not consumed" + (peerRequestedTxIds peerState' === 0) + , conjoin (fmap checkNew newGroup) + , conjoin (fmap checkRetained retainedGroup) + , conjoin (fmap checkMempool mempoolGroup) + , combinedStateInvariant policy WeakInvariant + (Map.singleton peerAddr (peerState', peerInFlight')) sharedState' ] - -unit_handleReceivedTxIds_addsAdvertiserForActiveTxs :: (String -> IO ()) -> Assertion -unit_handleReceivedTxIds_addsAdvertiserForActiveTxs step = do - step "Run handleReceivedTxIds for a peer advertising txids that are already active via other peers" - let (peerState', sharedState') = - handleReceivedTxIds - (const False) - now - defaultTxDecisionPolicy - peeraddr - requestedTxIds - txidsAndSizes - peerState0 - sharedState0 - step "Assert the local peer now tracks the txids as unacknowledged and available" - toList (peerUnacknowledgedTxIds peerState') @?= txKeys - peerAvailableTxIds peerState' @?= expectedAvailableTxs - step "Assert the shared peer view and advertiser counts were updated once per tx" - sharedPeerAdvertisedTxKeys (lookupPeerOrFail peeraddr sharedState') @?= expectedAdvertisedKeys - map (txAdvertiserCount . (`lookupEntryOrFail` sharedState')) txKeys - @?= map ((+ 1) . txAdvertiserCount . (`lookupEntryOrFail` sharedState0)) txKeys - step "Assert unrelated peers and shared mappings are unchanged apart from the generation bump" - Map.delete peeraddr (sharedPeers sharedState') @?= Map.delete peeraddr (sharedPeers sharedState0) - sharedTxIdToKey sharedState' @?= sharedTxIdToKey sharedState0 - sharedKeyToTxId sharedState' @?= sharedKeyToTxId sharedState0 - sharedGeneration sharedState' @?= sharedGeneration sharedState0 + 1 where - ReceiveDuplicateFixture - { rdfPeerAddr = peeraddr - , rdfRequestedTxIds = requestedTxIds - , rdfTxidsAndSizes = txidsAndSizes - , rdfPeerState = peerState0 - , rdfSharedState = sharedState0 - } = mkReceiveDuplicateFixture 4 3 + peerAddr = 1 :: PeerAddr + +-- | When a peer advertises a txid whose entry already exists, the entry +-- itself is unchanged: the per-peer 'PeerTxInFlight' simply records the +-- new advertisement. The new model has no 'txAdvertiserCount' to bump. +unit_handleReceivedTxIds_advertisesExistingEntry :: (String -> IO ()) -> Assertion +unit_handleReceivedTxIds_advertisesExistingEntry step = do + step "Set up a sharedTxTable entry leased to peer 0" + let txid :: TxId + txid = 7 + existing = mkActiveSharedState [0] 0 [] [(txid, 256)] + peerAddr2 :: PeerAddr + peerAddr2 = 1 + entryBefore = lookupEntryOrFail (lookupKeyOrFail txid existing) existing + + step "Peer 1 advertises the same txid" + let (peerState', peerInFlight', sharedState') = + handleReceivedTxIds (const False) now defaultTxDecisionPolicy + 1 [(txid, 256)] + emptyPeerTxLocalState { peerRequestedTxIds = 1 } + emptyPeerTxInFlight + existing + + step "Entry in sharedTxTable is unchanged" + let entryAfter = lookupEntryOrFail (lookupKeyOrFail txid sharedState') sharedState' + entryAfter @?= entryBefore + + step "Peer 1 now tracks the txid as advertised + available" + let k = unTxKey (lookupKeyOrFail txid sharedState') + pifAdvertised peerInFlight' @?= IntSet.singleton k + IntMap.keys (peerAvailableTxIds peerState') @?= [k] + toList (peerUnacknowledgedTxIds peerState') @?= [TxKey k] + -- peerAddr2 is referenced to keep the variable in scope for clarity. + _ <- pure peerAddr2 + pure () - txKeys = fmap (`lookupKeyOrFail` sharedState0) (fmap fst txidsAndSizes) - expectedAvailableTxs = - IntMap.fromList - [ (unTxKey txKey, txSize) - | (txid, txSize) <- txidsAndSizes - , let txKey = lookupKeyOrFail txid sharedState0 +-- +-- handleReceivedTxs +-- + +-- | Verifies that 'handleReceivedTxs' buffers requested bodies and +-- releases omitted ones, decrementing 'txAttempt' for each. The +-- pre-state is built by manually claiming each requested key. +prop_handleReceivedTxs + :: ArbTxDecisionPolicy + -> NonEmptyList (TxId, Positive Int, Bool) + -> Property +prop_handleReceivedTxs (ArbTxDecisionPolicy policy) + (NonEmpty rawInput) = + let + normalised :: [(TxId, SizeInBytes, Bool)] + normalised = + nubBy ((==) `on` (\(t,_,_) -> t)) + [ (abs txid + 1, mkSize sz, inReply) + | (txid, sz, inReply) <- rawInput ] - expectedAdvertisedKeys = IntSet.fromList (map unTxKey txKeys) -unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull :: (String -> IO ()) -> Assertion -unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull step = do - step "Receive a fresh txid from peer A while A is already at its inflight size limit" - let (peerAState1, sharedState1) = - handleReceivedTxIds - (const False) - now - defaultTxDecisionPolicy - peerA - requestedToReply - [(txid, txSize)] - peerAState0 - sharedState0 - txLease (lookupEntryOrFail key sharedState1) @?= TxClaimable now - let (peerAAction, _, _) = - nextPeerAction now defaultTxDecisionPolicy peerA peerAState1 sharedState1 - step "Run nextPeerAction for peer A and confirm the fresh tx remains unclaimed because A is full" - case peerAAction of - PeerDoNothing _ _ -> pure () - other -> - assertFailure ("unexpected peer A action: " ++ show other) - step "Receive the same txid from peer B and run nextPeerAction for B" - let (peerBState1, sharedState2) = - handleReceivedTxIds - (const False) - now - defaultTxDecisionPolicy - peerB - requestedToReply - [(txid, txSize)] - peerBState0 - sharedState1 - (peerBAction, peerBState2, sharedState3) = - nextPeerAction now defaultTxDecisionPolicy peerB peerBState1 sharedState2 - case peerBAction of - PeerRequestTxs txKeys -> do - step "Assert peer B can claim and request the fresh tx immediately" - txKeys @?= [key] - peerRequestedTxs peerBState2 @?= IntSet.singleton k - txLease (lookupEntryOrFail key sharedState3) @?= - TxLeased peerB (addTime (interTxSpace defaultTxDecisionPolicy) now) - other -> - assertFailure ("unexpected peer B action: " ++ show other) - where - peerA = 7 :: PeerAddr - peerB = 8 :: PeerAddr - txid = 1 - txSize = mkSize (Positive 10) - requestedToReply = 1 - key = TxKey 0 - k = unTxKey key + txidsAndSizes :: [(TxId, SizeInBytes)] + txidsAndSizes = [ (txid, sz) | (txid, sz, _) <- normalised ] + + -- 1) Receive the txids on peer 1 to set up advertised + available. sharedState0 = emptySharedTxState - { sharedPeers = Map.fromList - [ (peerA, mkSharedPeerState) - , (peerB, mkSharedPeerState) - ] + requestedToReply = fromIntegral (length txidsAndSizes) + peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = requestedToReply } + (peerState1, peerInFlight1, sharedState1) = + handleReceivedTxIds (const False) now policy + requestedToReply txidsAndSizes + peerState0 emptyPeerTxInFlight sharedState0 + + -- 2) Manually claim each key (simulate pickRequestTxs). + keys = [ lookupKeyOrFail txid sharedState1 | (txid, _) <- txidsAndSizes ] + leaseUntil = addTime (interTxSpace policy) now + + sharedState2 = sharedState1 { + sharedTxTable = + foldl' (\tbl k -> IntMap.adjust (claimEntry k) (unTxKey k) tbl) + (sharedTxTable sharedState1) keys, + sharedGeneration = sharedGeneration sharedState1 + 1 } - peerAState0 = emptyPeerTxLocalState - { peerRequestedTxIds = requestedToReply - , peerRequestedTxsSize = txsSizeInflightPerPeer defaultTxDecisionPolicy + claimEntry _ entry = + entry { + txLease = TxLeased peerAddr leaseUntil, + txAttempt = txAttempt entry + 1 } - peerBState0 = emptyPeerTxLocalState { peerRequestedTxIds = requestedToReply } - --- Verifies that handleReceivedTxs resolves each requested txid according to --- its 'RequestedFate': 'RfBuffered' flips the attempt to 'TxBuffered' and --- inserts the body; 'RfOmitted' releases peeraddr's lease with the entry --- surviving iff co-advertised; 'RfLateRetained' drops the body and counts --- a late reply with no further state change; 'RfLateMempool' drops the --- body, moves the active entry to sharedRetainedTxs, and strips advertising; --- 'RfOmittedPruned' counts a penalty even though the key has already been --- fully pruned from shared state. Aggregate 'omittedCount' and 'lateCount' --- match the group sizes, peer-local fields handleReceivedTxs does not touch --- are preserved, and the combined invariant holds before and after. -prop_handleReceivedTxs + + requestedKeySet = IntSet.fromList (fmap unTxKey keys) + batch = mkRequestedTxBatch keys (sum (fmap snd txidsAndSizes)) + peerState2 = peerState1 { + peerRequestedTxs = requestedKeySet, + peerRequestedTxBatches = StrictSeq.singleton batch, + peerRequestedTxsSize = requestedTxBatchSize batch + } + peerInFlight2 = peerInFlight1 { + pifLeased = IntSet.union (pifLeased peerInFlight1) requestedKeySet, + pifAttempting = IntSet.union (pifAttempting peerInFlight1) requestedKeySet + } + + -- 3) Run handleReceivedTxs with only the in-reply subset. + txReply :: [(TxId, Tx TxId)] + txReply = [ (txid, mkTx txid sz) + | (txid, sz, inReply) <- normalised + , inReply + ] + expectedBuffered = IntSet.fromList + [ unTxKey (lookupKeyOrFail txid sharedState1) + | (txid, _, inReply) <- normalised, inReply + ] + expectedOmitted = requestedKeySet `IntSet.difference` expectedBuffered + + (omittedCount, lateCount, peerState3, peerInFlight3, sharedState3) = + handleReceivedTxs (const False) now policy peerAddr txReply + peerState2 peerInFlight2 sharedState2 + in + classify (not (IntSet.null expectedBuffered)) "buffered subset" $ + classify (not (IntSet.null expectedOmitted)) "omitted subset" $ + conjoin + [ counterexample "lateCount should be zero (no retained / mempool branch hit)" + (lateCount === 0) + , counterexample "omittedCount mismatch" + (omittedCount === IntSet.size expectedOmitted) + , counterexample "buffered keys mismatch" + (IntMap.keysSet (peerDownloadedTxs peerState3) === expectedBuffered) + , counterexample "peerRequestedTxs not drained" + (peerRequestedTxs peerState3 === IntSet.empty) + , counterexample "request batch was not dequeued" + (peerRequestedTxBatches peerState3 === StrictSeq.empty) + , counterexample "pifLeased should retain only buffered keys" + (pifLeased peerInFlight3 === expectedBuffered) + , counterexample "pifAttempting should retain only buffered keys" + (pifAttempting peerInFlight3 === expectedBuffered) + , conjoin + [ counterexample + ("omitted entry " ++ show k ++ " should sit TxClaimable") $ + case IntMap.lookup k (sharedTxTable sharedState3) of + Just entry -> conjoin + [ case txLease entry of + TxClaimable _ -> property True + other -> counterexample (show other) (property False) + , counterexample "txAttempt not decremented" + (txAttempt entry === 0) + ] + Nothing -> counterexample "entry vanished" (property False) + | k <- IntSet.toList expectedOmitted + ] + , combinedStateInvariant policy WeakInvariant + (Map.singleton peerAddr (peerState3, peerInFlight3)) sharedState3 + ] + where + peerAddr = 1 :: PeerAddr + +-- +-- handleSubmittedTxs +-- + +-- | Accepted txs move into 'sharedRetainedTxs'; rejected txs stay in +-- the active table with 'TxClaimable' lease and cleared +-- 'txInSubmission'; the peer's 'pifSubmitting' is fully cleared. +prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected :: ArbTxDecisionPolicy - -> ArbSharedTxState - -> ArbPeerTxLocalState - -> NonEmptyList (TxId, Positive Int, RequestedFate) - -> NonNegative Double + -> NonEmptyList (TxId, Positive Int, Bool) -> Property -prop_handleReceivedTxs +prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected (ArbTxDecisionPolicy policy) - (ArbSharedTxState sharedState0) - (ArbPeerTxLocalState peerStateGenerated) - (NonEmpty requestedInput) - (NonNegative initialScore) = - forAll (genPeerAddrBiased sharedState0) $ \peeraddr -> - let sharedStateWithPeer = ensurePeerAdvertisesTxKeys peeraddr [] sharedState0 - - dedupedTagged :: [((TxId, SizeInBytes), RequestedFate)] - dedupedTagged = - nubBy ((==) `on` (fst . fst)) - [ ((abs txid + 1, mkSize txSize), fate) - | (txid, txSize, fate) <- requestedInput - ] - - freshenedTxids :: [(TxId, SizeInBytes)] - freshenedTxids = - freshBatchAgainstSharedState sharedStateWithPeer (fmap fst dedupedTagged) - - taggedFreshened :: [((TxId, SizeInBytes), RequestedFate)] - taggedFreshened = zip freshenedTxids (fmap snd dedupedTagged) - - otherPeerOpt :: Maybe PeerAddr - otherPeerOpt = - case filter (/= peeraddr) (Map.keys (sharedPeers sharedStateWithPeer)) of - [] -> Nothing - (p:_) -> Just p - - -- Partition by fate. - bufferedGroup, omittedGroup, lateRetainedGroup, lateMempoolGroup, prunedGroup - :: [((TxId, SizeInBytes), RequestedFate)] - bufferedGroup = [ e | e@(_, RfBuffered{}) <- taggedFreshened ] - omittedGroup = [ e | e@(_, RfOmitted{}) <- taggedFreshened ] - lateRetainedGroup = [ e | e@(_, RfLateRetained) <- taggedFreshened ] - lateMempoolGroup = [ e | e@(_, RfLateMempool) <- taggedFreshened ] - prunedGroup = [ e | e@(_, RfOmittedPruned) <- taggedFreshened ] - - -- Entries that need an active sharedTxTable seed, tagged with whether - -- the entry is co-advertised by otherPeer. LateMempool keys seed an - -- active entry (single advertiser) so the mempool branch of handleOne - -- hits the Just lookup and fires removeAdvertisingPeersForResolvedTx. - activeSeedTagged :: [((TxId, SizeInBytes), Bool)] - activeSeedTagged = - [ (fst e, rfCoAdvertised (snd e)) - | e <- taggedFreshened - , rfGoesToActive (snd e) + (NonEmpty rawInput) = + let + normalised :: [(TxId, SizeInBytes, Bool)] + normalised = + nubBy ((==) `on` (\(t,_,_) -> t)) + [ (abs txid + 1, mkSize sz, accept) + | (txid, sz, accept) <- rawInput ] - -- Seed retained first (no sharedTxTable entry, no advertising), then - -- active on top (leased to peeraddr with a TxDownloading attempt, plus - -- otherPeer advertising for co-advertised ones). Pruned entries are - -- deliberately not seeded: their keys live only in the peer's local - -- bookkeeping. - sharedStateWithLateRetained = - seedRetainedTxids policy (fmap fst lateRetainedGroup) sharedStateWithPeer - sharedStateBase = - seedRequestedActiveTxids peeraddr otherPeerOpt activeSeedTagged - sharedStateWithLateRetained - - -- Synthetic keys for pruned entries, chosen to land above every key - -- that 'sharedStateBase' already uses so they don't collide with - -- anything interned. These keys never appear in any shared-state map. - prunedAllocations :: [(((TxId, SizeInBytes), RequestedFate), Int)] - prunedAllocations = - zip prunedGroup [ sharedNextTxKey sharedStateBase + i - | i <- [0 .. length prunedGroup - 1] ] - - -- Resolve each tagged entry to an Int key: interned entries look their - -- key up in sharedStateBase; pruned entries use their synthetic key. - prunedKeyByTxId :: Map.Map TxId Int - prunedKeyByTxId = - Map.fromList [ (txid, k) | (((txid, _), _), k) <- prunedAllocations ] - - keyIntOf :: ((TxId, SizeInBytes), RequestedFate) -> Int - keyIntOf ((txid, _), fate) - | rfIsPruned fate = prunedKeyByTxId Map.! txid - | otherwise = unTxKey (lookupKeyOrFail txid sharedStateBase) - - -- All requested keys, in input order. - requestedKeyInts :: [Int] - requestedKeyInts = [ keyIntOf e | e <- taggedFreshened ] - requestedKeys :: [TxKey] - requestedKeys = fmap TxKey requestedKeyInts - requestedKeysSet :: IntSet.IntSet - requestedKeysSet = IntSet.fromList requestedKeyInts - requestedAvailableMap :: IntMap.IntMap SizeInBytes - requestedAvailableMap = - IntMap.fromList (zip requestedKeyInts (fmap (snd . fst) taggedFreshened)) - requestedTotalSize :: SizeInBytes - requestedTotalSize = sum (fmap (snd . fst) taggedFreshened) - requestedBatch = mkRequestedTxBatch requestedKeys requestedTotalSize - - -- Keys genuinely co-advertised by otherPeer (only Buffered/Omitted tags - -- can be co-advertised and only if otherPeerOpt is Just). - coAdvertisedKeys :: IntSet.IntSet - coAdvertisedKeys = - IntSet.fromList - [ keyIntOf e - | e@(_, fate) <- taggedFreshened - , rfCoAdvertised fate - , isJust otherPeerOpt - ] + txidsAndSizes = [ (txid, sz) | (txid, sz, _) <- normalised ] - -- Omitted entries that survive (co-advertised) vs get reaped (solo). - omittedSurvivingKeys, omittedReapedKeys, lateMempoolKeys :: IntSet.IntSet - omittedSurvivingKeys = - IntSet.fromList [ keyIntOf e | e <- omittedGroup, keyIntOf e `IntSet.member` coAdvertisedKeys ] - omittedReapedKeys = - IntSet.fromList [ keyIntOf e | e <- omittedGroup, not (keyIntOf e `IntSet.member` coAdvertisedKeys) ] - lateMempoolKeys = - IntSet.fromList [ keyIntOf e | e <- lateMempoolGroup ] - - -- mempoolHasTx returns True exactly for the LateMempool group's txids. - mempoolTxidSet :: Set.Set TxId - mempoolTxidSet = Set.fromList (fmap (fst . fst) lateMempoolGroup) - mempoolHasTxFn :: TxId -> Bool - mempoolHasTxFn = (`Set.member` mempoolTxidSet) - - -- Body list submitted to handleReceivedTxs: every in-reply entry. - receivedBodies :: [(TxId, Tx TxId)] - receivedBodies = - [ (txid, mkTx txid size) - | ((txid, size), fate) <- taggedFreshened - , rfInReply fate - ] + -- Pre-state: every key leased to peerAddr with txInSubmission set + -- and the key already counted in 'pifSubmitting'. + sharedState0 = mkActiveSharedState [peerAddr] peerAddr [] txidsAndSizes + keys = [ lookupKeyOrFail txid sharedState0 | (txid, _) <- txidsAndSizes ] + keySet = IntSet.fromList (fmap unTxKey keys) - -- Peer-local state. Shift the generator's keys out of range, then - -- prepend advertised-from-sharedState0 keys plus our requested keys to - -- the unack queue, and overwrite the request bookkeeping with a single - -- batch of our requested keys (handleReceivedTxs only processes the - -- head batch). The shift must also land above the pruned keys, which - -- were allocated starting at 'sharedNextTxKey sharedStateBase'. - peerKeyShift = - sharedNextTxKey sharedStateBase + length prunedGroup + 1 - preExistingAdvertised = - sharedPeerAdvertisedTxKeys - (lookupPeerOrFail peeraddr sharedStateWithPeer) - unackPrefix :: [TxKey] - unackPrefix = - [ TxKey k | k <- IntSet.toList preExistingAdvertised ] - ++ requestedKeys - peerStateShifted = shiftPeerTxLocalStateKeys peerKeyShift peerStateGenerated - peerState0 = peerStateShifted { - peerUnacknowledgedTxIds = - StrictSeq.fromList unackPrefix - <> peerUnacknowledgedTxIds peerStateShifted, - peerAvailableTxIds = requestedAvailableMap, - peerRequestedTxs = requestedKeysSet, - peerRequestedTxBatches = StrictSeq.singleton requestedBatch, - peerRequestedTxsSize = requestedTotalSize, - peerScore = PeerScore (min initialScore (scoreMax policy)) - (Time 0) - } + flipToSubmitting entry = + entry { txAttempt = 0, txInSubmission = True } + sharedState1 = sharedState0 { + sharedTxTable = + IntSet.foldl' (flip (IntMap.adjust flipToSubmitting)) + (sharedTxTable sharedState0) + keySet + } - (omittedCount, lateCount, peerState', sharedState') = - handleReceivedTxs mempoolHasTxFn now policy peeraddr - receivedBodies peerState0 sharedStateBase + peerInFlight0 = emptyPeerTxInFlight { + pifLeased = keySet, + pifSubmitting = keySet, + pifAdvertised = keySet + } + peerState0 = emptyPeerTxLocalState { + peerUnacknowledgedTxIds = StrictSeq.fromList keys, + peerAvailableTxIds = + IntMap.fromList [(unTxKey (lookupKeyOrFail txid sharedState1), sz) + | (txid, sz) <- txidsAndSizes], + peerDownloadedTxs = + IntMap.fromList [(unTxKey (lookupKeyOrFail txid sharedState1), + mkTx txid sz) + | (txid, sz) <- txidsAndSizes] + } - penaltyCount = omittedCount + lateCount - peerState'' | penaltyCount == 0 = peerState' - | otherwise = - snd (applyPeerRejections policy now penaltyCount peerState') + accepted = [ lookupKeyOrFail txid sharedState1 + | (txid, _, True) <- normalised ] + rejected = [ lookupKeyOrFail txid sharedState1 + | (txid, _, False) <- normalised ] - expectedRetainUntil = - addTime (bufferedTxsMinLifetime policy) now + acceptedKeys = IntSet.fromList (fmap unTxKey accepted) + rejectedKeys = IntSet.fromList (fmap unTxKey rejected) - -- Per-fate assertions. - checkBufferedEntry ((txid, size), _) = - let k = unTxKey (lookupKeyOrFail txid sharedState') in - conjoin - [ counterexample "buffered: peeraddr attempt not TxBuffered" - (fmap (\TxEntry { txAttempts } -> Map.lookup peeraddr txAttempts) - (IntMap.lookup k (sharedTxTable sharedState')) - === Just (Just TxBuffered)) - , counterexample "buffered: body missing from peerDownloadedTxs" - (fmap getTxId (IntMap.lookup k (peerDownloadedTxs peerState')) - === Just txid) - , counterexample "buffered: body has wrong size" - (fmap getTxSize (IntMap.lookup k (peerDownloadedTxs peerState')) - === Just size) - ] + (peerState', peerInFlight', sharedState') = + handleSubmittedTxs now policy peerAddr accepted rejected + peerState0 peerInFlight0 sharedState1 - checkOmittedSurvivingEntry ((txid, _), _) = - let k = unTxKey (lookupKeyOrFail txid sharedState') in + retainUntil = addTime (bufferedTxsMinLifetime policy) now + + checkAccepted k = + counterexample ("accepted " ++ show k) $ conjoin + [ counterexample "still in sharedTxTable" + (property (IntMap.notMember k (sharedTxTable sharedState'))) + , counterexample "missing or wrong retainUntil" + (retainedLookup k (sharedRetainedTxs sharedState') === Just retainUntil) + ] + checkRejected k = + counterexample ("rejected " ++ show k) $ case IntMap.lookup k (sharedTxTable sharedState') of - Nothing -> - counterexample ("co-adv omitted entry was reaped for " ++ show txid) - (property False) - Just TxEntry { txLease, txAdvertiserCount, txAttempts } -> - conjoin - [ counterexample "co-adv omitted: lease not demoted" - (txLease === TxClaimable now) - , counterexample "co-adv omitted: advertiser count not decremented" - (txAdvertiserCount === 1) - , counterexample "co-adv omitted: peeraddr attempt not cleared" - (property (Map.notMember peeraddr txAttempts)) - ] + Just entry -> conjoin + [ case txLease entry of + TxClaimable _ -> property True + other -> counterexample (show other) (property False) + , counterexample "txInSubmission still set" + (property (not (txInSubmission entry))) + ] + Nothing -> counterexample "entry vanished" (property False) + in + classify (not (null accepted)) "has accepted" $ + classify (not (null rejected)) "has rejected" $ + conjoin + [ counterexample "pifSubmitting still has keys" + (pifSubmitting peerInFlight' === IntSet.empty) + , counterexample "pifLeased not cleared for submitted keys" + (pifLeased peerInFlight' === IntSet.empty) + , counterexample "pifAdvertised not cleared for submitted keys" + (pifAdvertised peerInFlight' === IntSet.empty) + , counterexample "peerDownloadedTxs not cleared for submitted keys" + (IntMap.keysSet (peerDownloadedTxs peerState') === IntSet.empty) + , conjoin (fmap checkAccepted (IntSet.toList acceptedKeys)) + , conjoin (fmap checkRejected (IntSet.toList rejectedKeys)) + , combinedStateInvariant policy WeakInvariant + (Map.singleton peerAddr (peerState', peerInFlight')) sharedState' + ] + where + peerAddr = 1 :: PeerAddr + +-- +-- nextPeerAction +-- + +-- | An idle peer whose shared state has no work returns +-- 'PeerDoNothing' carrying the current 'sharedGeneration'. +prop_nextPeerAction_returnsSharedGeneration + :: ArbTxDecisionPolicy + -> Word64 + -> Property +prop_nextPeerAction_returnsSharedGeneration (ArbTxDecisionPolicy policy0) gen = + let + -- A zero unack window forces 'pickRequestTxIdsAction' to return + -- 'Nothing', so the scheduler falls through to 'PeerDoNothing'. + policy = policy0 { maxUnacknowledgedTxIds = 0 + , maxNumTxIdsToRequest = 0 } + sharedState :: SharedTxState PeerAddr TxId + sharedState = emptySharedTxState { sharedGeneration = gen } + (action, _, _, _) = + nextPeerAction now policy peerAddr emptyPeerTxLocalState + emptyPeerTxInFlight sharedState + in + case action of + PeerDoNothing g _ -> g === gen + _ -> counterexample (show action) + (property False) + where + peerAddr = 1 :: PeerAddr - checkOmittedReapedEntry ((txid, _), _) = - let k = unTxKey (lookupKeyOrFail txid sharedStateBase) - rawId = getRawTxId txid +-- | When advertised candidates exceed the per-peer size budget, +-- 'pickRequestTxs' picks a contiguous prefix of the unacked queue +-- whose total size fits the budget (with the soft single-tx allowance +-- documented in 'pickRequestTxsAction'). +prop_nextPeerAction_picksTxsRespectingBudget + :: ArbTxDecisionPolicy + -> NonEmptyList (TxId, Positive Int) + -> Property +prop_nextPeerAction_picksTxsRespectingBudget (ArbTxDecisionPolicy policy0) + (NonEmpty rawInput) = + let + -- Tighten the budget so the test exercises the truncation path. + policy = policy0 { txsSizeInflightPerPeer = SizeInBytes 4096 + , maxOutstandingTxBatchesPerPeer = 4 } + + txidsAndSizes :: [(TxId, SizeInBytes)] + txidsAndSizes = + nubBy ((==) `on` fst) + [ (abs txid + 1, SizeInBytes (1 + (fromIntegral n `mod` 2048))) + | (txid, Positive n) <- rawInput + ] + + requestedToReply = fromIntegral (length txidsAndSizes) + peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = requestedToReply } + (peerState1, peerInFlight1, sharedState1) = + handleReceivedTxIds (const False) now policy + requestedToReply txidsAndSizes + peerState0 emptyPeerTxInFlight emptySharedTxState + + (action, _peerState', peerInFlight', sharedState') = + nextPeerAction now policy peerAddr peerState1 peerInFlight1 sharedState1 + + keyOrder = [ unTxKey (lookupKeyOrFail txid sharedState1) + | (txid, _) <- txidsAndSizes ] + sizeOf k = + maybe 0 getSizeInBytes + (IntMap.lookup k (peerAvailableTxIds peerState1)) + + budget = getSizeInBytes (txsSizeInflightPerPeer policy) + isPrefix _ [] = True + isPrefix [] _ = False + isPrefix (x:xs) (y:ys) = x == y && isPrefix xs ys + in + case action of + PeerRequestTxs picked -> + let pickedKeys = fmap unTxKey picked + pickedSize = sum (fmap sizeOf pickedKeys) + -- The first picked tx may exceed the budget on its own + -- (soft budget); subsequent picks must keep the running + -- total at or below the budget. + tailSize = pickedSize - maybe 0 sizeOf (listToMaybe pickedKeys) in conjoin - [ counterexample "reaped omitted: still in sharedTxTable" - (property (IntMap.notMember k (sharedTxTable sharedState'))) - , counterexample "reaped omitted: leaked into sharedRetainedTxs" - (property (not (retainedMember k (sharedRetainedTxs sharedState')))) - , counterexample "reaped omitted: still in sharedKeyToTxId" - (property (IntMap.notMember k (sharedKeyToTxId sharedState'))) - , counterexample "reaped omitted: still in sharedTxIdToKey" - (property (Map.notMember rawId (sharedTxIdToKey sharedState'))) + [ counterexample "picked is not a prefix of the unacked queue" + (property (pickedKeys `isPrefix` keyOrder)) + , counterexample + ("tail of picked exceeds budget: " ++ show pickedSize) + (property (tailSize <= budget)) + , counterexample "picked keys not added to pifLeased" + (property (IntSet.fromList pickedKeys + `IntSet.isSubsetOf` pifLeased peerInFlight')) + , counterexample "claimed entries should be TxLeased to peerAddr" + (conjoin + [ case IntMap.lookup k (sharedTxTable sharedState') of + Just entry -> case txLease entry of + TxLeased owner _ -> owner === peerAddr + other -> counterexample (show other) + (property False) + Nothing -> counterexample "missing entry" + (property False) + | k <- pickedKeys + ]) ] + _ -> + -- If the candidates were empty (e.g. all sizes happened to be + -- the same and dedupe stripped them) the scheduler may pick + -- something else; just check the action type isn't junk. + counterexample (show action) (property True) + where + peerAddr = 1 :: PeerAddr - -- LateRetained: sharedRetainedTxs entry untouched; no sharedTxTable - -- entry exists or is created; body was dropped. - checkLateRetainedEntry ((txid, _), _) = - let k = unTxKey (lookupKeyOrFail txid sharedStateBase) in - conjoin - [ counterexample "late-retained: leaked into sharedTxTable" - (property (IntMap.notMember k (sharedTxTable sharedState'))) - , counterexample "late-retained: retain-until mismatch" - (retainedLookup k (sharedRetainedTxs sharedState') - === Just expectedRetainUntil) - , counterexample "late-retained: leaked into peerDownloadedTxs" - (property (IntMap.notMember k (peerDownloadedTxs peerState'))) - ] +-- | When the peer has buffered a downloaded body of which it owns the +-- lease, 'nextPeerAction' submits before requesting more bodies or +-- acking. +prop_nextPeerAction_ownerSubmitsBuffered + :: ArbTxDecisionPolicy + -> Positive Int + -> Property +prop_nextPeerAction_ownerSubmitsBuffered (ArbTxDecisionPolicy policy) + (Positive sizeBytes) = + let + txid :: TxId + txid = 1 + sz = SizeInBytes (1 + fromIntegral (sizeBytes `mod` 1024)) - -- LateMempool: active entry moved from sharedTxTable to - -- sharedRetainedTxs; peeraddr's advertising stripped. - checkLateMempoolEntry ((txid, _), _) = - let k = unTxKey (lookupKeyOrFail txid sharedStateBase) in - conjoin - [ counterexample "late-mempool: still in sharedTxTable" - (property (IntMap.notMember k (sharedTxTable sharedState'))) - , counterexample "late-mempool: retain-until mismatch" - (retainedLookup k (sharedRetainedTxs sharedState') - === Just expectedRetainUntil) - , counterexample "late-mempool: leaked into peerDownloadedTxs" - (property (IntMap.notMember k (peerDownloadedTxs peerState'))) - , counterexample "late-mempool: still in peer advertised keys" - (property (IntSet.notMember k - (sharedPeerAdvertisedTxKeys - (lookupPeerOrFail peeraddr sharedState')))) - ] + -- 1) Peer receives the txid (creates the entry). + (peerState1, peerInFlight1, sharedState1) = + handleReceivedTxIds (const False) now policy 1 [(txid, sz)] + (emptyPeerTxLocalState { peerRequestedTxIds = 1 }) + emptyPeerTxInFlight emptySharedTxState - -- Pruned: synthetic key was never interned; handleOmitted takes the - -- count-only branch and leaves shared state untouched. - checkPrunedEntry (((txid, _), _), k) = - let rawId = getRawTxId txid in - conjoin - [ counterexample "pruned: leaked into sharedTxTable" - (property (IntMap.notMember k (sharedTxTable sharedState'))) - , counterexample "pruned: leaked into sharedRetainedTxs" - (property (not (retainedMember k (sharedRetainedTxs sharedState')))) - , counterexample "pruned: leaked into sharedKeyToTxId" - (property (IntMap.notMember k (sharedKeyToTxId sharedState'))) - , counterexample "pruned: leaked into sharedTxIdToKey" - (property (Map.notMember rawId (sharedTxIdToKey sharedState'))) - , counterexample "pruned: leaked into peerDownloadedTxs" - (property (IntMap.notMember k (peerDownloadedTxs peerState'))) - ] + txKey = lookupKeyOrFail txid sharedState1 + keyInt = unTxKey txKey + leaseUntil = addTime (interTxSpace policy) now - -- Expected peeraddr advertised keys post-call: pre-existing plus only - -- the buffered group's keys. Omitted-group advertising is stripped by - -- removeOmittedAdvertisedKeys; LateMempool advertising is stripped by - -- removeAdvertisingPeersForResolvedTx; LateRetained never advertised. - expectedPeerAdvertisedPost = - preExistingAdvertised - `IntSet.union` - IntSet.fromList - [ unTxKey (lookupKeyOrFail txid sharedState') - | ((txid, _), _) <- bufferedGroup - ] + -- 2) Manually claim and buffer the body (simulating + -- pickRequestTxsAction + handleReceivedTxs's buffered branch). + sharedState2 = sharedState1 { + sharedTxTable = + IntMap.adjust + (\entry -> entry { + txLease = TxLeased peerAddr leaseUntil, + txAttempt = txAttempt entry + 1 + }) + keyInt (sharedTxTable sharedState1) + } + peerState2 = peerState1 { + peerDownloadedTxs = IntMap.singleton keyInt (mkTx txid sz) + } + peerInFlight2 = peerInFlight1 { + pifLeased = IntSet.insert keyInt (pifLeased peerInFlight1), + pifAttempting = IntSet.insert keyInt (pifAttempting peerInFlight1) + } - -- otherPeer is added to wakePeers when any surviving omitted entry - -- exists; its generation is bumped unconditionally. LateMempool is - -- set up single-advertiser so it does not wake otherPeer here. - checkOtherPeerState = - case otherPeerOpt of - Just op | not (IntSet.null omittedSurvivingKeys) -> - let original = lookupPeerOrFail op sharedStateBase - post = lookupPeerOrFail op sharedState' in - conjoin - [ counterexample "other peer's advertised keys changed" - (sharedPeerAdvertisedTxKeys post - === sharedPeerAdvertisedTxKeys original) - , counterexample "other peer's generation bump mismatch" - (sharedPeerGeneration post - === sharedPeerGeneration original + 1) - ] - _ -> property True - - affectedPeers :: Set.Set PeerAddr - affectedPeers = - Set.insert peeraddr $ - case otherPeerOpt of - Just op | not (IntSet.null omittedSurvivingKeys) -> Set.singleton op - _ -> Set.empty - - -- Key sets used for the "unchanged on stable old keys" assertions. - -- Every requested key has its sharedTxTable slot touched (added, - -- removed, or modified), so they are all excluded from stableForTxTable. - -- sharedKeyToTxId only loses reaped keys; sharedRetainedTxs only gains - -- LateMempool keys (and keeps the LateRetained pre-seed entries - -- unchanged). - oldKeys = IntMap.keysSet (sharedKeyToTxId sharedStateBase) - stableForKeyMaps = oldKeys `IntSet.difference` omittedReapedKeys - stableForTxTable = oldKeys `IntSet.difference` requestedKeysSet - stableForRetained = oldKeys `IntSet.difference` lateMempoolKeys - - -- Generated peerDownloadedTxs keys (shifted) should survive untouched. - genDownloadedKeys = - IntMap.keysSet (peerDownloadedTxs peerStateShifted) in - classify (StrictSeq.null (peerUnacknowledgedTxIds peerStateGenerated)) - "generated peer-local state: empty unacknowledged queue" $ - classify (not (Map.member peeraddr (sharedPeers sharedState0))) - "peeraddr: fresh (not in generated sharedState)" $ - classify (not (IntSet.null coAdvertisedKeys)) - "requested txs include co-advertised" $ - classify (not (null bufferedGroup)) "requested txs include buffered" $ - classify (not (null omittedGroup)) "requested txs include omitted" $ - classify (not (IntSet.null omittedSurvivingKeys)) - "requested txs include omitted + surviving" $ - classify (not (IntSet.null omittedReapedKeys)) - "requested txs include omitted + reaped" $ - classify (not (null lateRetainedGroup)) "requested txs include late-retained" $ - classify (not (null lateMempoolGroup)) "requested txs include late-mempool" $ - classify (not (null prunedGroup)) "requested txs include pruned" $ - tabulate "requested" [bucket (length freshenedTxids)] $ - tabulate "buffered" [bucket (length bufferedGroup)] $ - tabulate "omitted" [bucket (length omittedGroup)] $ - tabulate "late-retained" [bucket (length lateRetainedGroup)] $ - tabulate "late-mempool" [bucket (length lateMempoolGroup)] $ - tabulate "pruned" [bucket (length prunedGroup)] $ - tabulate "sharedState peers" [bucket (Map.size (sharedPeers sharedStateBase))] $ - conjoin - [ omittedCount === length omittedGroup + length prunedGroup - , lateCount === length lateRetainedGroup + length lateMempoolGroup - , peerRequestedTxs peerState' === IntSet.empty - , peerRequestedTxBatches peerState' === StrictSeq.empty - , peerRequestedTxsSize peerState' === 0 - , peerAvailableTxIds peerState' === IntMap.empty - , peerUnacknowledgedTxIds peerState' === peerUnacknowledgedTxIds peerState0 - , peerRequestedTxIds peerState' === peerRequestedTxIds peerState0 - , peerDownloadStartTime peerState' === peerDownloadStartTime peerState0 - , peerScore peerState' === peerScore peerState0 - , counterexample "generated peerDownloadedTxs entries not preserved" - (IntMap.restrictKeys (peerDownloadedTxs peerState') genDownloadedKeys - === peerDownloadedTxs peerStateShifted) - , counterexample "buffered bodies not all inserted into peerDownloadedTxs" - (IntMap.restrictKeys (peerDownloadedTxs peerState') - (IntSet.fromList - [ unTxKey (lookupKeyOrFail txid sharedState') - | ((txid, _), _) <- bufferedGroup - ]) - === IntMap.fromList - [ (unTxKey (lookupKeyOrFail txid sharedState'), mkTx txid size) - | ((txid, size), _) <- bufferedGroup - ]) - , Map.withoutKeys (sharedPeers sharedState') affectedPeers - === Map.withoutKeys (sharedPeers sharedStateBase) affectedPeers - , sharedPeerAdvertisedTxKeys (lookupPeerOrFail peeraddr sharedState') - === expectedPeerAdvertisedPost - , IntMap.restrictKeys (sharedTxTable sharedState') stableForTxTable - === IntMap.restrictKeys (sharedTxTable sharedStateBase) stableForTxTable - , IntMap.restrictKeys (sharedKeyToTxId sharedState') stableForKeyMaps - === IntMap.restrictKeys (sharedKeyToTxId sharedStateBase) stableForKeyMaps - , retainedRestrictKeys (sharedRetainedTxs sharedState') stableForRetained - === retainedRestrictKeys (sharedRetainedTxs sharedStateBase) stableForRetained - , sharedGeneration sharedState' === sharedGeneration sharedStateBase + 1 - , conjoin (fmap checkBufferedEntry bufferedGroup) - , conjoin [ checkOmittedSurvivingEntry e - | e <- omittedGroup - , keyIntOf e `IntSet.member` coAdvertisedKeys - ] - , conjoin [ checkOmittedReapedEntry e - | e <- omittedGroup - , not (keyIntOf e `IntSet.member` coAdvertisedKeys) - ] - , conjoin (fmap checkLateRetainedEntry lateRetainedGroup) - , conjoin (fmap checkLateMempoolEntry lateMempoolGroup) - , conjoin (fmap checkPrunedEntry prunedAllocations) - , checkOtherPeerState - , counterexample "combined invariant violated before the call" - (combinedStateInvariant policy StrongInvariant peeraddr peerState0 sharedStateBase) - , counterexample "combined invariant violated after the call" - (combinedStateInvariant policy StrongInvariant peeraddr peerState' sharedState') - , counterexample "score path: peerScoreValue not as expected" - (if penaltyCount == 0 - then peerScoreValue (peerScore peerState'') - === peerScoreValue (peerScore peerState0) - else peerScoreValue (peerScore peerState'') - === min (scoreMax policy) - (currentPeerScore policy now (peerScore peerState0) - + fromIntegral penaltyCount)) - , counterexample "score path: peerScoreTs not advanced" - (if penaltyCount == 0 - then peerScoreTs (peerScore peerState'') - === peerScoreTs (peerScore peerState0) - else peerScoreTs (peerScore peerState'') === now) - , counterexample "combined invariant violated after score update" - (combinedStateInvariant policy StrongInvariant peeraddr peerState'' sharedState') - , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedState'" sharedState' - ] + (action, _, _, _) = + nextPeerAction now policy peerAddr peerState2 peerInFlight2 sharedState2 + in + case action of + PeerSubmitTxs ks -> ks === [txKey] + _ -> counterexample (show action) (property False) + where + peerAddr = 1 :: PeerAddr --- Verifies that handleSubmittedTxs retains accepted txs and removes rejected --- txs from the active table and tx-key maps. Generated over a non-empty --- list of (txid, size, accepted-flag, co-advertised-flag): the --- accepted-flag controls accept vs reject; the co-advertised-flag adds a --- second peer as advertiser, so rejected co-advertised txs stay in --- 'sharedTxTable' (only the calling peer's advertisement is removed) --- while rejected solo txs are dropped from all maps. -prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected +-- | After 'sweepSharedState' fires, retained entries past their +-- deadline are gone. This is a lightweight sanity check on the +-- retention sweep. +prop_nextPeerAction_prunesExpiredRetained :: ArbTxDecisionPolicy -> Positive Int - -> NonEmptyList (TxId, Positive Int, Bool, Bool) -> Property -prop_handleSubmittedTxs_retainsAcceptedAndDropsRejected - (ArbTxDecisionPolicy policy) - (Positive peeraddr) - (NonEmpty rawEntries) = - tabulate "accepted count" [bucket (length acceptedKeys)] - . tabulate "rejected count" [bucket (length rejectedKeys)] - . tabulate "co-advertised count" [bucket (length [() | (_, _, _, True) <- entries])] - $ conjoin $ - [ peerDownloadedTxs peerState' === IntMap.empty - , sharedGeneration sharedState' === sharedGeneration sharedState0 + 1 - , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedState'" sharedState' - ] - ++ - [ counterexample ("accepted txid=" ++ show txid) (acceptedAssertions txid) - | (txid, _, True, _) <- entries - ] - ++ - [ counterexample ("rejected solo txid=" ++ show txid) (rejectedSoloAssertions txid) - | (txid, _, False, False) <- entries - ] - ++ - [ counterexample ("rejected co-advertised txid=" ++ show txid) (rejectedCoAdvAssertions txid) - | (txid, _, False, True) <- entries - ] - where - -- Use a distinct address as the second advertiser. Adding @peeraddr + 1@ - -- guarantees they don't clash even if QC produces consecutive ids. - otherPeer = peeraddr + 1 +prop_nextPeerAction_prunesExpiredRetained (ArbTxDecisionPolicy policy) + (Positive nTxids) = + let + n = 1 + (nTxids `mod` 5) + txidsAndSizes = + [ (t, SizeInBytes 64) | t <- [1 .. n] ] + + sharedState0 = seedRetainedTxids policy txidsAndSizes emptySharedTxState + expired = addTime (bufferedTxsMinLifetime policy + 1) now + sharedState' = sweepSharedState expired IntSet.empty sharedState0 + in + counterexample + ("retained set after sweep: " ++ show (retainedSize (sharedRetainedTxs sharedState'))) + (retainedSize (sharedRetainedTxs sharedState') === 0) + +-- | Before the retention deadline expires, retained entries survive a +-- sweep call. +prop_nextPeerAction_keepsRetained + :: ArbTxDecisionPolicy + -> Positive Int + -> Property +prop_nextPeerAction_keepsRetained (ArbTxDecisionPolicy policy) + (Positive nTxids) = + let + n = 1 + (nTxids `mod` 5) + txidsAndSizes = + [ (t, SizeInBytes 64) | t <- [1 .. n] ] + + sharedState0 = seedRetainedTxids policy txidsAndSizes emptySharedTxState + sharedState' = sweepSharedState now IntSet.empty sharedState0 + in + retainedSize (sharedRetainedTxs sharedState') === n - -- Normalise: shift txids to >= 1, convert sizes, dedupe by shifted txid - -- (first occurrence wins). NonEmpty input ensures at least one entry - -- survives. - entries :: [(TxId, SizeInBytes, Bool, Bool)] - entries = nubBy ((==) `on` (\(t, _, _, _) -> t)) - $ map (\(t, sz, acc, co) -> (abs t + 1, mkSize sz, acc, co)) rawEntries +-- +-- nextPeerActionPipelined +-- - sharedState0 = - let st = mkSharedState [ txid | (txid, _, _, _) <- entries ] - peeraddrKeys = [ lookupKeyOrFail txid st | (txid, _, _, _) <- entries ] - otherPeerKeys = [ lookupKeyOrFail txid st | (txid, _, _, True) <- entries ] in - ensurePeerAdvertisesTxKeys otherPeer otherPeerKeys - $ ensurePeerAdvertisesTxKeys peeraddr peeraddrKeys - $ st { sharedTxTable = IntMap.fromList - [ (unTxKey (lookupKeyOrFail txid st), - (mkTxEntry peeraddr sz (Just TxBuffered) policy) - { txAdvertiserCount = if co then 2 else 1 }) - | (txid, sz, _, co) <- entries - ] - } +-- | In pipelined mode 'pickRequestTxIdsAction' must return @Nothing@ +-- when either the ack count or the request count is zero (the wire +-- format would otherwise produce an ack-only or request-only +-- pipelined message, which is not allowed). +-- | When the peer has at least one ackable txid AND room to request +-- more, 'nextPeerActionPipelined' emits a 'PeerRequestTxIds' with both +-- counts non-zero. +prop_nextPeerActionPipelined_requestsTxIds + :: ArbTxDecisionPolicy + -> Property +prop_nextPeerActionPipelined_requestsTxIds (ArbTxDecisionPolicy policy0) = + let + policy = policy0 { maxUnacknowledgedTxIds = 8 + , maxNumTxIdsToRequest = 4 } + -- Pre-state: two ackable retained txids (so the keep-one-unacked + -- clamp on pipelined requests still leaves a non-zero ack) and an + -- outstanding pipelined req (peerRequestedTxIds = 1) so the wire + -- format treats the new request as pipelined. + txids = [(1, 64), (2, 64)] :: [(TxId, SizeInBytes)] + sharedState0 = seedRetainedTxids policy txids emptySharedTxState + keys = [ TxKey (unTxKey (lookupKeyOrFail txid sharedState0)) + | (txid, _) <- txids ] + peerState0 = emptyPeerTxLocalState { + peerUnacknowledgedTxIds = StrictSeq.fromList keys, + peerRequestedTxIds = 1 + } + (action, _, _, _) = + nextPeerActionPipelined now policy peerAddr peerState0 + emptyPeerTxInFlight sharedState0 + in + counterexample ("got: " ++ show action) $ + case action of + PeerRequestTxIds TxIdsPipelinedReq ack req -> + conjoin [ counterexample "ack should be non-zero" + (property (ack /= 0)) + , counterexample "req should be non-zero" + (property (req /= 0)) + ] + _ -> property False + where + peerAddr = 1 :: PeerAddr - keyOf txid = lookupKeyOrFail txid sharedState0 - kOf = unTxKey . keyOf +-- | The 'PeerDoNothing' wake-delay is the smallest of: earliest +-- claim-ready time among advertised txs, earliest stuck-bump time +-- among buffered txs, and earliest retention-expiry time. This test +-- focuses on the retention case: an idle peer with nothing else but a +-- single retained tx must wake at that retention deadline. +prop_nextPeerAction_earliestWakeDelay + :: ArbTxDecisionPolicy + -> Positive Int + -> Property +prop_nextPeerAction_earliestWakeDelay (ArbTxDecisionPolicy policy0) + (Positive offsetSec) = + let + -- Force the doNothing branch. + policy = policy0 { maxUnacknowledgedTxIds = 0 + , maxNumTxIdsToRequest = 0 + , bufferedTxsMinLifetime = realToFrac offsetSec } + txid :: TxId + txid = 1 + sharedState0 = seedRetainedTxids policy [(txid, 64)] emptySharedTxState + expectedWake = bufferedTxsMinLifetime policy + (action, _, _, _) = + nextPeerAction now policy peerAddr emptyPeerTxLocalState + emptyPeerTxInFlight sharedState0 + in + counterexample ("got: " ++ show action) $ + case action of + PeerDoNothing _ (Just delay) -> delay === expectedWake + _ -> property False + where + peerAddr = 1 :: PeerAddr - acceptedKeys = [ keyOf txid | (txid, _, True, _) <- entries ] - rejectedKeys = [ keyOf txid | (txid, _, False, _) <- entries ] +-- | While a body reply is still in flight, the protocol requires the +-- peer to keep at least one txid unacked. Verifies that +-- 'pickRequestTxIdsAction' clamps the ack count accordingly when the +-- only ackable txid is the one that should stay unacked. +unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply + :: (String -> IO ()) -> Assertion +unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply step = do + step "Set up a peer with one retained (ackable) txid and an outstanding body batch" + let txid :: TxId + txid = 1 + policy = defaultTxDecisionPolicy + sharedState0 = seedRetainedTxids policy [(txid, 64)] emptySharedTxState + keyInt = unTxKey (lookupKeyOrFail txid sharedState0) + outstandingBatch = mkRequestedTxBatch [TxKey keyInt] 64 + peerState0 = emptyPeerTxLocalState { + peerUnacknowledgedTxIds = StrictSeq.singleton (TxKey keyInt), + peerRequestedTxs = IntSet.singleton keyInt, + peerRequestedTxBatches = StrictSeq.singleton outstandingBatch, + peerRequestedTxsSize = 64 + } - peerState0 = emptyPeerTxLocalState - { peerDownloadedTxs = IntMap.fromList - [ (kOf txid, mkTx txid sz) - | (txid, sz, _, _) <- entries - ] - } - - expectedRetainUntil = addTime (bufferedTxsMinLifetime policy) now - - (peerState', sharedState') = - handleSubmittedTxs now policy peeraddr acceptedKeys rejectedKeys - peerState0 sharedState0 - - advertisedKeysOf peer st = - sharedPeerAdvertisedTxKeys (lookupPeerOrFail peer st) - - acceptedAssertions txid = - let k = kOf txid - key = keyOf txid in - conjoin - [ IntMap.lookup k (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) - , retainedLookup k (sharedRetainedTxs sharedState') === Just expectedRetainUntil - , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Just key - , IntMap.lookup k (sharedKeyToTxId sharedState') === Just txid - ] + step "Run nextPeerActionPipelined" + let (action, _, _, _) = + nextPeerActionPipelined now policy peerAddr peerState0 + emptyPeerTxInFlight sharedState0 + + step "Pipelined response must keep the txid unacked while the body batch is outstanding" + case action of + PeerRequestTxIds TxIdsPipelinedReq ack _ -> + ack @?= 0 + PeerDoNothing {} -> + -- Acceptable: not enough room to request anything. + pure () + other -> + assertFailure ("unexpected action: " ++ show other) + where + peerAddr = 1 :: PeerAddr - rejectedSoloAssertions txid = - let k = kOf txid in - conjoin - [ IntMap.lookup k (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) - , retainedLookup k (sharedRetainedTxs sharedState') === Nothing - , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Nothing - , IntMap.lookup k (sharedKeyToTxId sharedState') === Nothing - ] +prop_nextPeerActionPipelined_requiresAckAndReq + :: ArbTxDecisionPolicy + -> Property +prop_nextPeerActionPipelined_requiresAckAndReq (ArbTxDecisionPolicy policy0) = + let + -- Cap to small windows so the property is easy to reason about. + policy = policy0 { maxUnacknowledgedTxIds = 4 + , maxNumTxIdsToRequest = 4 } + txid :: TxId + txid = 1 + -- Pre-state: peer has one unacked txid that is *not* ackable + -- (entry sits TxClaimable with no peer attempt and the peer is + -- still tracking it as advertised), AND there is already a + -- pipelined request outstanding so 'keepOneUnackedForPipelinedRequest' + -- forces num_acked = 0. + sharedState0 = mkActiveSharedState [peerAddr] peerAddr [] [(txid, 64)] + keyInt = unTxKey (lookupKeyOrFail txid sharedState0) + -- Strip the leaseholder so the entry is TxClaimable, attempt 0. + sharedState1 = sharedState0 { + sharedTxTable = + IntMap.adjust + (\entry -> entry { + txLease = TxClaimable now, + txAttempt = 0 + }) + keyInt (sharedTxTable sharedState0) + } + peerState0 = emptyPeerTxLocalState { + peerUnacknowledgedTxIds = StrictSeq.singleton (TxKey keyInt), + peerAvailableTxIds = IntMap.singleton keyInt 64, + peerRequestedTxIds = 1 -- pretend there's a pipelined req in flight + } + peerInFlight0 = emptyPeerTxInFlight { + pifAdvertised = IntSet.singleton keyInt + } - rejectedCoAdvAssertions txid = - let k = kOf txid - key = keyOf txid in - conjoin - [ counterexample "entry should remain in sharedTxTable" - $ isJust (IntMap.lookup k (sharedTxTable sharedState')) - , counterexample "this peer's advertisement should be removed" - $ not (IntSet.member k (advertisedKeysOf peeraddr sharedState')) - , counterexample "co-advertiser's advertisement should remain" - $ IntSet.member k (advertisedKeysOf otherPeer sharedState') - , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Just key - , IntMap.lookup k (sharedKeyToTxId sharedState') === Just txid - , retainedLookup k (sharedRetainedTxs sharedState') === Nothing - ] + (action, _, _, _) = + nextPeerActionPipelined now policy peerAddr peerState0 + peerInFlight0 sharedState1 + in + counterexample ("got: " ++ show action) $ + case action of + PeerRequestTxIds {} -> property False + _ -> property True + where + peerAddr = 1 :: PeerAddr --- | Trigger kinds for the model-based 'nextPeerAction' property. Each --- describes one action that should be choosable at the moment the test --- calls 'nextPeerAction'. The state-builder turns the list into a --- consistent ('PeerTxLocalState', 'SharedTxState') pair. -data ActionTrigger - = TSubmittable TxId (Positive Int) -- buffered + owned + body-downloaded - | TFetchable TxId (Positive Int) -- claimable + advertised + in available - | TAckable TxId -- in 'sharedRetainedTxs' + unacked queue - | TFetchableLater (Positive Int) TxId (Positive Int) - -- ^ delay-in-seconds + txid + size: claimable only after the loop has - -- advanced 'time' by at least the delay; otherwise just like - -- 'TFetchable'. - deriving (Eq, Show) +-- | While a peer holds one outstanding body batch and there is room for +-- another, 'nextPeerActionPipelined' opens the second batch. +prop_nextPeerActionPipelined_secondBodyBatch + :: ArbTxDecisionPolicy + -> Property +prop_nextPeerActionPipelined_secondBodyBatch (ArbTxDecisionPolicy basePolicy) = + let + policy = basePolicy + { maxOutstandingTxBatchesPerPeer = max 2 (maxOutstandingTxBatchesPerPeer basePolicy) + , txsSizeInflightPerPeer = SizeInBytes 4096 + } + txSizeA, txSizeB :: SizeInBytes + txSizeA = SizeInBytes 100 + txSizeB = SizeInBytes 100 + keyA = TxKey 0 + keyB = TxKey 1 + kA = unTxKey keyA + kB = unTxKey keyB + leaseUntil = addTime (interTxSpace policy) now + sharedState = emptySharedTxState + { sharedTxTable = IntMap.fromList + [ (kA, TxEntry + { txLease = TxLeased peerAddr leaseUntil + , txAttempt = 1 + , txInSubmission = False + , currentMaxInflightMultiplicity = txInflightMultiplicity policy + }) + , (kB, TxEntry + { txLease = TxClaimable now + , txAttempt = 0 + , txInSubmission = False + , currentMaxInflightMultiplicity = txInflightMultiplicity policy + }) + ] + , sharedTxIdToKey = Map.fromList [(getRawTxId (1 :: TxId), keyA), (getRawTxId (2 :: TxId), keyB)] + , sharedKeyToTxId = IntMap.fromList [(kA, 1 :: TxId), (kB, 2 :: TxId)] + , sharedNextTxKey = 2 + } + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.fromList [keyA, keyB] + , peerAvailableTxIds = IntMap.fromList [(kA, txSizeA), (kB, txSizeB)] + , peerRequestedTxs = IntSet.singleton kA + , peerRequestedTxBatches = StrictSeq.singleton (mkRequestedTxBatch [keyA] txSizeA) + , peerRequestedTxsSize = txSizeA + } + peerInFlight0 = emptyPeerTxInFlight + { pifAdvertised = IntSet.fromList [kA, kB] + , pifLeased = IntSet.singleton kA + , pifAttempting = IntSet.singleton kA + } -instance Arbitrary ActionTrigger where - arbitrary = oneof - [ TSubmittable <$> arbitrary <*> arbitrary - , TFetchable <$> arbitrary <*> arbitrary - , TAckable <$> arbitrary - , TFetchableLater <$> arbitrary <*> arbitrary <*> arbitrary - ] - -- Shrink only by demoting to a simpler trigger constructor and at most - -- one numeric-field alternative. Avoids the combinatorial blow-up that - -- recursive 'shrink' on every numeric field produces, which made QC's - -- shrinker hundreds of steps slow on rich trigger lists. - shrink (TSubmittable t s) = - [ TFetchable t s, TAckable t ] - ++ [ TSubmittable t' s | t' <- take 1 (shrink t) ] - shrink (TFetchable t s) = - TAckable t : - [ TFetchable t' s | t' <- take 1 (shrink t) ] - shrink (TAckable t) = - [ TAckable t' | t' <- take 1 (shrink t) ] - shrink (TFetchableLater d t s) = - [ TFetchable t s, TAckable t ] -- demote: collapse delay - ++ [ TFetchableLater d' t s | d' <- take 1 (shrink d) ] + (action, _, peerInFlight', sharedState') = + nextPeerActionPipelined now policy peerAddr peerState0 + peerInFlight0 sharedState + in + counterexample ("got: " ++ show action) $ + case action of + PeerRequestTxs [picked] -> + conjoin + [ counterexample "expected to pick keyB" + (picked === keyB) + , counterexample "B not added to pifLeased" + (property (IntSet.member kB (pifLeased peerInFlight'))) + , counterexample "B not added to pifAttempting" + (property (IntSet.member kB (pifAttempting peerInFlight'))) + , counterexample "B not leased to peerAddr" + (case IntMap.lookup kB (sharedTxTable sharedState') of + Just entry -> case txLease entry of + TxLeased owner _ -> owner === peerAddr + _ -> property False + Nothing -> property False) + ] + _ -> property False + where + peerAddr = 7 :: PeerAddr -triggerTxid :: ActionTrigger -> TxId -triggerTxid (TSubmittable t _) = t -triggerTxid (TFetchable t _) = t -triggerTxid (TAckable t) = t -triggerTxid (TFetchableLater _ t _) = t +-- | When the peer already holds 'maxOutstandingTxBatchesPerPeer' +-- outstanding body batches, 'nextPeerActionPipelined' refuses to open +-- another even with available candidates. +prop_nextPeerActionPipelined_noThirdBodyBatch + :: ArbTxDecisionPolicy + -> Property +prop_nextPeerActionPipelined_noThirdBodyBatch (ArbTxDecisionPolicy basePolicy) = + let + policy = basePolicy { maxOutstandingTxBatchesPerPeer = 2 } + txSize :: SizeInBytes + txSize = SizeInBytes 64 + keyA = TxKey 0 + keyB = TxKey 1 + keyC = TxKey 2 + kA = unTxKey keyA + kB = unTxKey keyB + kC = unTxKey keyC + leaseUntil = addTime (interTxSpace policy) now + inflightEntry = TxEntry + { txLease = TxLeased peerAddr leaseUntil + , txAttempt = 1 + , txInSubmission = False + , currentMaxInflightMultiplicity = txInflightMultiplicity policy + } + freshEntry = TxEntry + { txLease = TxClaimable now + , txAttempt = 0 + , txInSubmission = False + , currentMaxInflightMultiplicity = txInflightMultiplicity policy + } + sharedState = emptySharedTxState + { sharedTxTable = IntMap.fromList + [ (kA, inflightEntry) + , (kB, inflightEntry) + , (kC, freshEntry) + ] + , sharedTxIdToKey = Map.fromList + [ (getRawTxId (1 :: TxId), keyA) + , (getRawTxId (2 :: TxId), keyB) + , (getRawTxId (3 :: TxId), keyC) + ] + , sharedKeyToTxId = IntMap.fromList + [ (kA, 1 :: TxId), (kB, 2 :: TxId), (kC, 3 :: TxId) ] + , sharedNextTxKey = 3 + } + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.fromList [keyA, keyB, keyC] + , peerAvailableTxIds = IntMap.fromList + [(kA, txSize), (kB, txSize), (kC, txSize)] + , peerRequestedTxs = IntSet.fromList [kA, kB] + , peerRequestedTxBatches = StrictSeq.fromList + [ mkRequestedTxBatch [keyA] txSize + , mkRequestedTxBatch [keyB] txSize + ] + , peerRequestedTxsSize = txSize + txSize + } + peerInFlight0 = emptyPeerTxInFlight + { pifAdvertised = IntSet.fromList [kA, kB, kC] + , pifLeased = IntSet.fromList [kA, kB] + , pifAttempting = IntSet.fromList [kA, kB] + } -isTSubmittable :: ActionTrigger -> Bool -isTSubmittable TSubmittable{} = True -isTSubmittable _ = False + (action, _, _, _) = + nextPeerActionPipelined now policy peerAddr peerState0 + peerInFlight0 sharedState + in + counterexample ("got: " ++ show action) $ + case action of + PeerRequestTxs {} -> property False + _ -> property True + where + peerAddr = 7 :: PeerAddr -isTAckable :: ActionTrigger -> Bool -isTAckable TAckable{} = True -isTAckable _ = False +-- | A peer with two unacked txids — one whose entry is leased to +-- another peer (blocked) and one that's still claimable — must skip +-- the blocked one and fetch the claimable one. +unit_nextPeerAction_skipsBlockedAvailableTxs + :: (String -> IO ()) -> Assertion +unit_nextPeerAction_skipsBlockedAvailableTxs step = do + step "Set up two advertised txs: keyA leased to another peer with the cap full, keyB claimable" + let policy0 = defaultTxDecisionPolicy + policy = policy0 { txInflightMultiplicity = 1 } + peerAddr = 7 :: PeerAddr + otherPeer = 8 :: PeerAddr + keyA = TxKey 0 + keyB = TxKey 1 + kA = unTxKey keyA + kB = unTxKey keyB + sharedState = emptySharedTxState + { sharedTxTable = IntMap.fromList + [ (kA, TxEntry + { txLease = TxLeased otherPeer (addTime 10 now) + , txAttempt = 1 + , txInSubmission = False + , currentMaxInflightMultiplicity = txInflightMultiplicity policy + }) + , (kB, TxEntry + { txLease = TxClaimable now + , txAttempt = 0 + , txInSubmission = False + , currentMaxInflightMultiplicity = txInflightMultiplicity policy + }) + ] + , sharedTxIdToKey = Map.fromList + [(getRawTxId (1 :: TxId), keyA), (getRawTxId (2 :: TxId), keyB)] + , sharedKeyToTxId = IntMap.fromList [(kA, 1 :: TxId), (kB, 2 :: TxId)] + , sharedNextTxKey = 2 + } + peerState = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.fromList [keyA, keyB] + , peerAvailableTxIds = IntMap.fromList [(kA, 10), (kB, 11)] + } + peerInFlight = emptyPeerTxInFlight + { pifAdvertised = IntSet.fromList [kA, kB] + } -isTFetchableNow :: ActionTrigger -> Bool -isTFetchableNow TFetchable{} = True -isTFetchableNow _ = False + step "Run nextPeerAction" + let (action, _, _, sharedState') = + nextPeerAction now policy peerAddr peerState peerInFlight sharedState + + step "The claimable tx is requested and leased; the blocked tx is skipped" + case action of + PeerRequestTxs picked -> + picked @?= [keyB] + other -> + assertFailure ("unexpected action: " ++ show other) + + case IntMap.lookup kB (sharedTxTable sharedState') of + Just entry -> case txLease entry of + TxLeased owner _ -> owner @?= peerAddr + TxClaimable _ -> assertFailure "keyB still TxClaimable" + Nothing -> assertFailure "keyB entry vanished" + +-- | When the peer's unacked queue mixes a retained-and-acked tx and a +-- buffered-but-blocked tx, only the safe prefix is acked; the blocked +-- tx remains unacked. +unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx + :: (String -> IO ()) -> Assertion +unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx step = do + step "Set up: tx 1 retained (ackable), tx 2 leased + buffered + already submitting via another peer" + let peerAddr = 7 :: PeerAddr + otherPeer = 8 :: PeerAddr + resolvedKey = TxKey 1 + blockedKey = TxKey 2 + kResolved = unTxKey resolvedKey + kBlocked = unTxKey blockedKey + blockedTx = mkTx 2 (mkSize (Positive 10)) + policy = defaultTxDecisionPolicy + blockedEntry = TxEntry + { txLease = TxLeased peerAddr (addTime 10 now) + , txAttempt = 1 + , txInSubmission = True + , currentMaxInflightMultiplicity = txInflightMultiplicity policy + } + sharedState = emptySharedTxState + { sharedTxTable = IntMap.singleton kBlocked blockedEntry + , sharedRetainedTxs = retainedSingleton kResolved (addTime 17 now) + , sharedTxIdToKey = Map.fromList + [(getRawTxId (1 :: TxId), resolvedKey), (getRawTxId (2 :: TxId), blockedKey)] + , sharedKeyToTxId = IntMap.fromList [(kResolved, 1 :: TxId), (kBlocked, 2 :: TxId)] + , sharedNextTxKey = 3 + } + peerState = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.fromList [resolvedKey, blockedKey] + , peerDownloadedTxs = IntMap.singleton kBlocked blockedTx + } + -- This peer holds the lease and is inside mempoolAddTxs; + -- another peer (otherPeer) is also "submitting" so submitting- + -- by-other is True from peerAddr's viewpoint. We model that by + -- otherPeer's pifSubmitting having the key. + peerInFlight = emptyPeerTxInFlight + { pifLeased = IntSet.singleton kBlocked + , pifSubmitting = IntSet.singleton kBlocked + } -isTFetchableLater :: ActionTrigger -> Bool -isTFetchableLater TFetchableLater{} = True -isTFetchableLater _ = False + step "Run nextPeerAction (with the blocked tx already submitted by 'me')" + let (action, peerState', _, _) = + nextPeerAction now policy peerAddr peerState peerInFlight sharedState + + -- The pickSubmit walk finds the blocked tx still buffered + this + -- peer is the submitter (so 'txSubmittingByOther' is False), so + -- the test should fall through to a txid request that acks the + -- retained prefix only. + step "Only the retained prefix is acked; blocked tx remains unacked" + case action of + PeerRequestTxIds _ ack req -> do + ack @?= 1 + assertBool ("expected positive txIdsToReq, got " ++ show req) (req > 0) + toList (peerUnacknowledgedTxIds peerState') @?= [blockedKey] + PeerSubmitTxs ks -> + ks @?= [blockedKey] + other -> + assertFailure ("unexpected action: " ++ show other) + -- otherPeer is referenced only to express the multi-peer setup. + _ <- pure (otherPeer :: PeerAddr) + pure () + +-- | When peer A is at its inflight-size cap and a fresh txid arrives, +-- peer A cannot claim; once peer B advertises the same key, peer B can +-- claim and download. +unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull + :: (String -> IO ()) -> Assertion +unit_nextPeerAction_claimsFreshTxWhenFirstAdvertiserIsFull step = do + step "Receive a fresh txid from peer A while A is already at its inflight size limit" + let (peerAState1, peerAInFlight1, sharedState1) = + handleReceivedTxIds (const False) now defaultTxDecisionPolicy + requestedToReply [(txid, txSize)] + peerAState0 emptyPeerTxInFlight sharedState0 + txLease (lookupEntryOrFail key sharedState1) @?= TxClaimable now + + step "nextPeerAction for peer A: tx remains unclaimed because A is at the size cap" + let (peerAAction, _, _, _) = + nextPeerAction now defaultTxDecisionPolicy peerA + peerAState1 peerAInFlight1 sharedState1 + case peerAAction of + PeerDoNothing _ _ -> pure () + PeerRequestTxIds {} -> pure () + other -> assertFailure ("unexpected peer A action: " ++ show other) + + step "Peer B advertises the same txid" + let (peerBState1, peerBInFlight1, sharedState2) = + handleReceivedTxIds (const False) now defaultTxDecisionPolicy + requestedToReply [(txid, txSize)] + peerBState0 emptyPeerTxInFlight sharedState1 + + step "nextPeerAction for peer B: claims and requests the fresh tx" + let (peerBAction, peerBState2, peerBInFlight2, sharedState3) = + nextPeerAction now defaultTxDecisionPolicy peerB + peerBState1 peerBInFlight1 sharedState2 + case peerBAction of + PeerRequestTxs txKeys -> do + txKeys @?= [key] + peerRequestedTxs peerBState2 @?= IntSet.singleton k + IntSet.member k (pifLeased peerBInFlight2) @?= True + case IntMap.lookup k (sharedTxTable sharedState3) of + Just entry -> case txLease entry of + TxLeased owner _ -> owner @?= peerB + _ -> assertFailure "B's claim did not produce TxLeased B" + Nothing -> assertFailure "entry vanished after B's claim" + other -> assertFailure ("unexpected peer B action: " ++ show other) + where + peerA = 7 :: PeerAddr + peerB = 8 :: PeerAddr + txid :: TxId + txid = 1 + txSize = mkSize (Positive 10) + requestedToReply :: NumTxIdsToReq + requestedToReply = 1 + key = TxKey 0 + k = unTxKey key + sharedState0 = emptySharedTxState + peerAState0 = emptyPeerTxLocalState + { peerRequestedTxIds = requestedToReply + , peerRequestedTxsSize = txsSizeInflightPerPeer defaultTxDecisionPolicy + } + peerBState0 = emptyPeerTxLocalState + { peerRequestedTxIds = requestedToReply } --- | Generator-time bias selector: 'ModeDisjoint' renumbers all triggers --- so each peer's txid range is unique (no cross-peer overlap), --- 'ModeShared' collapses txids into a small shared pool so cross-peer --- overlap arises with high probability. Both modes are sampled so --- single-peer-shape regressions and cross-peer-guard regressions are --- both reachable from the corpus. -data OverlapMode = ModeDisjoint | ModeShared - deriving (Eq, Show) +-- | After peer A submits the body and the mempool rejects it, the +-- entry's lease releases back to 'TxClaimable' and peer B (still +-- advertising the key) is free to claim and re-attempt on its next +-- 'nextPeerAction' pass. +unit_nextPeerAction_claimsRejectedTxFromOtherAdvertiser + :: (String -> IO ()) -> Assertion +unit_nextPeerAction_claimsRejectedTxFromOtherAdvertiser step = do + step "Pre-state: tx leased to peer A, A is in mempoolAddTxs (post-markSubmittingTxs), B advertises the same key" + let txid :: TxId + txid = 4 + txKeyInt :: Int + txKeyInt = 0 + txSize :: SizeInBytes + txSize = 100 + txBody :: Tx TxId + txBody = mkTx txid txSize + peerA = 1 :: PeerAddr + peerB = 2 :: PeerAddr + sharedState0 :: SharedTxState PeerAddr TxId + sharedState0 = emptySharedTxState + { sharedTxIdToKey = Map.singleton (getRawTxId txid) (TxKey txKeyInt) + , sharedKeyToTxId = IntMap.singleton txKeyInt txid + , sharedNextTxKey = txKeyInt + 1 + , sharedTxTable = IntMap.singleton txKeyInt TxEntry + { txLease = TxLeased peerA (addTime 1 now) + , txAttempt = 0 + , txInSubmission = True + , currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy + } + } + peerAState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton (TxKey txKeyInt) + , peerDownloadedTxs = IntMap.singleton txKeyInt txBody + } + peerAInFlight0 = emptyPeerTxInFlight + { pifAdvertised = IntSet.singleton txKeyInt + , pifLeased = IntSet.singleton txKeyInt + , pifSubmitting = IntSet.singleton txKeyInt + } + peerBState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton (TxKey txKeyInt) + , peerAvailableTxIds = IntMap.singleton txKeyInt txSize + } + peerBInFlight0 = emptyPeerTxInFlight + { pifAdvertised = IntSet.singleton txKeyInt + } --- | A scenario is an overlap mode plus a per-peer trigger map. With 1-3 --- peers, scenarios cover both single-peer behaviour and the cross-peer --- code paths in 'nextPeerAction' (lease contention, --- 'txSubmittingAnywhere', advertiser bookkeeping) that the single-peer --- model could not reach. -data TriggerScenario = - TriggerScenario OverlapMode (Map.Map PeerAddr [ActionTrigger]) - deriving (Eq, Show) + step "Peer A submits and the mempool rejects" + let (peerAState', peerAInFlight', sharedStateAfter) = + handleSubmittedTxs now defaultTxDecisionPolicy peerA + [] [TxKey txKeyInt] + peerAState0 peerAInFlight0 sharedState0 + + step "Lease released, attempt cleared, peer A's pif* cleared for the key" + case IntMap.lookup txKeyInt (sharedTxTable sharedStateAfter) of + Just entry -> do + txLease entry @?= TxClaimable now + txAttempt entry @?= 0 + txInSubmission entry @?= False + Nothing -> assertFailure "entry vanished after rejection" + peerDownloadedTxs peerAState' @?= IntMap.empty + pifLeased peerAInFlight' @?= IntSet.empty + pifSubmitting peerAInFlight' @?= IntSet.empty + pifAdvertised peerAInFlight' @?= IntSet.empty + + step "Peer B's nextPeerAction now claims the released tx" + let (peerBAction, peerBState', peerBInFlight', sharedStateFinal) = + nextPeerAction now defaultTxDecisionPolicy peerB + peerBState0 peerBInFlight0 sharedStateAfter + peerBAction @?= PeerRequestTxs [TxKey txKeyInt] + peerRequestedTxs peerBState' @?= IntSet.singleton txKeyInt + IntSet.member txKeyInt (pifLeased peerBInFlight') @?= True + case IntMap.lookup txKeyInt (sharedTxTable sharedStateFinal) of + Just entry -> case txLease entry of + TxLeased owner _ -> owner @?= peerB + _ -> assertFailure "B's claim did not produce TxLeased B" + Nothing -> assertFailure "entry vanished after B's claim" + +-- | A peer with a non-zero score waits its score-derived delay before +-- claiming a TxClaimable entry. Set up the entry to become claimable +-- exactly @peerScore / 20000@ seconds before @now@; the peer should +-- be allowed to claim at @now@. +unit_nextPeerAction_claimsAtScoreDelayThreshold + :: (String -> IO ()) -> Assertion +unit_nextPeerAction_claimsAtScoreDelayThreshold step = do + step "Set up: claimableAt = now - 1ms, peer score = 20 (1ms claim delay), so claim at now is just allowed" + let peerAddr = 7 :: PeerAddr + txid :: TxId + txid = 1 + txSize = mkSize (Positive 10) + key = TxKey 0 + k = unTxKey key + claimableAt = Time 99.999 -- now is Time 100 + sharedState = emptySharedTxState + { sharedTxIdToKey = Map.singleton (getRawTxId txid) key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxClaimable claimableAt + , txAttempt = 0 + , txInSubmission = False + , currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy + } + } + peerState = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerAvailableTxIds = IntMap.singleton k txSize + , peerScore = PeerScore 20 now + } + peerInFlight = emptyPeerTxInFlight + { pifAdvertised = IntSet.singleton k + } --- | Per-peer trigger-list generator. Singleton is common; empty and --- large lists are explicitly represented; the per-element generator --- biases toward uniform-of-one-class so all-fetchable / all-ackable --- scenarios arise often enough to exercise their code paths. -genPerPeerTriggers :: Gen [ActionTrigger] -genPerPeerTriggers = do - size <- frequency - [ (2, pure 1) -- singleton - , (1, pure 0) -- empty - , (3, choose (2, 10)) -- small - , (1, choose (11, 100)) -- larger - ] - genElem <- oneof - [ pure arbitrary - , pure (TFetchable <$> arbitrary <*> arbitrary) - , pure (TSubmittable <$> arbitrary <*> arbitrary) - , pure (TAckable <$> arbitrary) - , pure (TFetchableLater <$> arbitrary <*> arbitrary <*> arbitrary) - ] - vectorOf size genElem + step "Run nextPeerAction" + let (action, peerState', _, sharedState') = + nextPeerAction now defaultTxDecisionPolicy peerAddr + peerState peerInFlight sharedState --- | Size-first shrink for a per-peer trigger list: halve, drop one, --- element-shrink. Same behaviour as the original 'shrink' for --- 'TriggerScenario' before the multi-peer refactor. -shrinkTriggerList :: [ActionTrigger] -> [[ActionTrigger]] -shrinkTriggerList ts = - let n = length ts in - [ take (n `div` 2) ts | n >= 2 ] - ++ [ drop (n `div` 2) ts | n >= 2 ] - ++ [ take i ts ++ drop (i + 1) ts | i <- [0 .. n - 1] ] - ++ [ take i ts ++ t' : drop (i + 1) ts - | i <- [0 .. n - 1] - , t' <- shrink (ts !! i) - ] + step "Tx becomes claimable once the peerScore / 20000 ms threshold has elapsed" + case action of + PeerRequestTxs txKeys -> do + txKeys @?= [key] + peerRequestedTxs peerState' @?= IntSet.singleton k + case IntMap.lookup k (sharedTxTable sharedState') of + Just entry -> case txLease entry of + TxLeased owner _ -> owner @?= peerAddr + _ -> assertFailure "did not lease to peerAddr" + Nothing -> assertFailure "entry vanished after claim" + other -> assertFailure ("unexpected action: " ++ show other) + +-- | A peer that has buffered a tx blocked by another peer's submission +-- should still request body work for OTHER advertised txs that are +-- claimable. Without this, a single stuck submission would starve the +-- whole peer. +unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx + :: (String -> IO ()) -> Assertion +unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx step = do + step "Set up: blocked tx leased to me + buffered, but another peer is submitting it; second tx is claimable" + let peerAddr = 7 :: PeerAddr + submittingPeer = 8 :: PeerAddr + blockedTxid :: TxId + blockedTxid = 1 + claimableTxid :: TxId + claimableTxid = 2 + blockedSize = mkSize (Positive 10) + claimableSize = mkSize (Positive 11) + blockedKey = TxKey 1 + claimableKey = TxKey 2 + kBlocked = unTxKey blockedKey + kClaimable = unTxKey claimableKey + blockedTx = mkTx blockedTxid blockedSize + blockedEntry = TxEntry + { txLease = TxLeased peerAddr (addTime 10 now) + , txAttempt = 1 + , txInSubmission = True -- submittingPeer is in mempoolAddTxs + , currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy + } + sharedState = emptySharedTxState + { sharedTxTable = IntMap.fromList + [ (kBlocked, blockedEntry) + , (kClaimable, TxEntry + { txLease = TxClaimable now + , txAttempt = 0 + , txInSubmission = False + , currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy + }) + ] + , sharedTxIdToKey = Map.fromList + [ (getRawTxId blockedTxid, blockedKey) + , (getRawTxId claimableTxid, claimableKey) + ] + , sharedKeyToTxId = IntMap.fromList + [ (kBlocked, blockedTxid), (kClaimable, claimableTxid) ] + , sharedNextTxKey = 3 + } + peerState = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.fromList [blockedKey, claimableKey] + , peerAvailableTxIds = IntMap.fromList + [ (kBlocked, blockedSize), (kClaimable, claimableSize) ] + , peerDownloadedTxs = IntMap.singleton kBlocked blockedTx + } + -- This peer holds the lease on the blocked tx but didn't + -- submit it themselves; the submitting peer is someone else. + peerInFlight = emptyPeerTxInFlight + { pifAdvertised = IntSet.fromList [kBlocked, kClaimable] + , pifLeased = IntSet.singleton kBlocked + , pifAttempting = IntSet.singleton kBlocked + } --- | Replace every 'ActionTrigger's txid so each peer's range is unique --- across peers. Guarantees zero cross-peer overlap regardless of the raw --- arbitrary txids. -renumberDisjoint :: [[ActionTrigger]] -> [[ActionTrigger]] -renumberDisjoint = snd . mapAccumL renumberOne 1 - where - renumberOne nextId triggers = - let n = length triggers in - (nextId + n, zipWith setTxid triggers [nextId .. nextId + n - 1]) + step "Run nextPeerAction" + let (action, peerState', _, sharedState') = + nextPeerAction now defaultTxDecisionPolicy peerAddr + peerState peerInFlight sharedState + + step "Blocked tx stays buffered while the claimable tx is requested" + case action of + PeerRequestTxs picked -> do + picked @?= [claimableKey] + peerUnacknowledgedTxIds peerState' @?= peerUnacknowledgedTxIds peerState + peerRequestedTxs peerState' @?= IntSet.singleton kClaimable + peerDownloadedTxs peerState' @?= peerDownloadedTxs peerState + case IntMap.lookup kClaimable (sharedTxTable sharedState') of + Just entry -> case txLease entry of + TxLeased owner _ -> owner @?= peerAddr + _ -> assertFailure "claimable not leased to peerAddr" + Nothing -> assertFailure "claimable entry vanished" + other -> + assertFailure ("unexpected action: " ++ show other) + -- submittingPeer is referenced only for clarity of the test setup. + _ <- pure (submittingPeer :: PeerAddr) + pure () + +-- | A peer that is not the leaseholder and has not buffered the body +-- waits with the txid unacked until the tx resolves out of the active +-- table. After it resolves into 'sharedRetainedTxs' the same +-- 'nextPeerAction' call now ack-ifies the txid. +prop_nextPeerAction_nonOwnerWaitsUntilResolved + :: ArbTxDecisionPolicy + -> TxId + -> Property +prop_nextPeerAction_nonOwnerWaitsUntilResolved (ArbTxDecisionPolicy policy) + txid0 = + let + txid = abs txid0 + 1 + key = TxKey 0 + k = unTxKey key + -- Unresolved: entry sits TxClaimable; another peer "has the body" + -- but in the new model that's reflected by txAttempt > 0 on the + -- entry; this peer's pif* are empty. Peer's peerAvailableTxIds is + -- empty so it can't claim either. + unresolvedSharedState :: SharedTxState PeerAddr TxId + unresolvedSharedState = emptySharedTxState + { sharedTxTable = IntMap.singleton k TxEntry + { txLease = TxClaimable now + , txAttempt = 0 + , txInSubmission = False + , currentMaxInflightMultiplicity = txInflightMultiplicity policy + } + , sharedTxIdToKey = Map.singleton (getRawTxId txid) key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + } + resolvedSharedState = unresolvedSharedState + { sharedTxTable = IntMap.empty + , sharedRetainedTxs = retainedSingleton k (addTime 17 now) + } + peerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerRequestedTxIds = 0 + } + peerInFlight0 = emptyPeerTxInFlight + { pifAdvertised = IntSet.singleton k + } --- | Replace every 'ActionTrigger's txid with a draw from a shared pool --- of size 'poolSize'. Within-peer collisions get deduped by the --- subsequent 'normaliseTriggers'; cross-peer collisions become the --- overlap that the multi-peer test is designed to exercise. -collapseToPool :: Int -> [[ActionTrigger]] -> Gen [[ActionTrigger]] -collapseToPool poolSize = traverse (traverse remap) + (unresolvedAction, unresolvedPeerState', _, _) = + nextPeerAction now policy peerAddr peerState0 peerInFlight0 + unresolvedSharedState + (resolvedAction, resolvedPeerState', _, _) = + nextPeerAction now policy peerAddr peerState0 peerInFlight0 + resolvedSharedState + in + conjoin + [ counterexample "unresolved: must not ack the unresolved txid" $ + case unresolvedAction of + PeerDoNothing _ _ -> + peerUnacknowledgedTxIds unresolvedPeerState' + === peerUnacknowledgedTxIds peerState0 + PeerRequestTxIds _ ack _ -> + counterexample ("ack should be 0, got " ++ show ack) + (ack === 0) + other -> + counterexample ("unexpected unresolved action: " ++ show other) + (property False) + , counterexample "resolved: must ack the now-retained txid" $ + case resolvedAction of + PeerRequestTxIds _ ack _ -> + conjoin + [ ack === 1 + , peerUnacknowledgedTxIds resolvedPeerState' === StrictSeq.empty + ] + other -> + counterexample ("unexpected resolved action: " ++ show other) + (property False) + ] where - remap trig = do - newId <- chooseInt (1, poolSize) - pure (setTxid trig newId) + peerAddr = 1 :: PeerAddr -setTxid :: ActionTrigger -> TxId -> ActionTrigger -setTxid (TSubmittable _ s) t = TSubmittable t s -setTxid (TFetchable _ s) t = TFetchable t s -setTxid (TAckable _) t = TAckable t -setTxid (TFetchableLater d _ s) t = TFetchableLater d t s +-- | Roles for 'prop_nextPeerAction_claimsClaimableTx': 'Good' has no +-- score and can claim immediately; 'Bad' has a non-zero score that +-- pushes its claim past 'now'; 'Confounder' has no relevant local +-- state and is expected to do nothing on any 'nextPeerAction' call. +data PeerRole = Good | Bad | Confounder + deriving (Eq, Show) -instance Arbitrary TriggerScenario where - arbitrary = do - -- Peer-count distribution: weighted toward 1-2 peers so the simpler - -- single-peer cases remain well-represented while 3-peer scenarios - -- still arise often enough to surface multi-claim contention. - nPeers <- frequency - [ (2, pure 1) - , (2, pure 2) - , (1, pure 3) - ] - perPeer <- vectorOf nPeers genPerPeerTriggers - -- Mode-frequency 2:3 ensures both modes are well-represented; - -- 'ModeShared' is weighted slightly higher because cross-peer - -- overlap is the harder-to-reach coverage class. - mode <- frequency - [ (2, pure ModeDisjoint) - , (3, pure ModeShared) - ] - remapped <- case mode of - ModeDisjoint -> pure (renumberDisjoint perPeer) - ModeShared -> - let totalT = sum (map length perPeer) - poolSz = max 1 (totalT `div` 2) in - collapseToPool poolSz perPeer - pure (TriggerScenario mode - (Map.fromList (zip [1 .. nPeers] remapped))) - -- Shrink keeps the mode and shrinks structure: drop a peer first, - -- then shrink one peer's list. Keeping at least one peer preserves - -- well-formedness; mode is locked at generation so shrinks never - -- migrate between disjoint and shared. - shrink (TriggerScenario mode m) = - [ TriggerScenario mode (Map.delete p m) - | Map.size m > 1 - , p <- Map.keys m +-- | Whether the lease starts as 'TxClaimable claimableAt' (a fresh +-- claimable lease) or as 'TxLeased oldOwner claimableAt' with +-- @claimableAt < now@ (a stale lease whose deadline already passed, +-- so any other advertiser whose 'peerClaimDelay' permits can claim). +data LeaseStart = ClaimableLease | ExpiredLease + deriving (Eq, Show) + +instance Arbitrary LeaseStart where + arbitrary = elements [ClaimableLease, ExpiredLease] + +-- | A scheduling order for the three peers. Generated as a permutation +-- of the three roles. +newtype PeerOrder = PeerOrder [PeerRole] + deriving Show + +instance Arbitrary PeerOrder where + arbitrary = PeerOrder <$> shuffle [Good, Bad, Confounder] + +-- | Drives 'nextPeerAction' for three peers ('Good', 'Bad', +-- 'Confounder') in a generator-chosen order. Asserts: +-- +-- * 'Good' (no score) claims the tx and ends up the leaseholder. +-- * 'Bad' (non-zero score) yields 'PeerDoNothing' with a wake delay +-- equal to the score-derived delay (offset by whether 'Bad' ran +-- before or after 'Good's claim). +-- * 'Confounder' (no relevant state) yields 'PeerDoNothing' with no +-- wake. +-- * The combined invariant holds for the full @(peerLocal, +-- peerInFlight)@ snapshot of all three peers after each action. +prop_nextPeerAction_claimsClaimableTx + :: ArbTxDecisionPolicy + -> Positive Int + -> Positive Int + -> Positive Int + -> TxId + -> Positive Int + -> Positive Int + -> Positive Int + -> PeerOrder + -> LeaseStart + -> Property +prop_nextPeerAction_claimsClaimableTx + (ArbTxDecisionPolicy arbPolicy) + (Positive good0) (Positive bad0) (Positive conf0) + txid0 txSize0 (Positive badScore0) (Positive tDecay0) (PeerOrder order) + leaseStart = + tabulate "order" [show order] + . tabulate "lease" [show leaseStart] + $ conjoin + [ counterexample ("Good must claim: " ++ show (lookupAction Good)) $ + case lookupAction Good of + Just (PeerRequestTxs txKeys, _, _) -> txKeys === [key] + _ -> property False + , counterexample ("Bad must yield with the score-delay derived wake: " + ++ show (lookupAction Bad)) $ + case lookupAction Bad of + Just (PeerDoNothing _ (Just delay), _, _) -> + let diff = abs (delay - expectedBadDelay) in + counterexample + ("delay = " ++ show delay + ++ ", expected = " ++ show expectedBadDelay + ++ ", diff = " ++ show diff) + (property (diff < 1e-9)) + other -> + counterexample ("got: " ++ show other) (property False) + , counterexample ("Confounder must do nothing with no scheduled wake: " + ++ show (lookupAction Confounder)) $ + case lookupAction Confounder of + Just (PeerDoNothing _ Nothing, _, _) -> property True + _ -> property False + , counterexample "Lease must end up at Good" $ + case IntMap.lookup k (sharedTxTable sharedStateFinal) of + Just entry -> case txLease entry of + TxLeased owner _ -> owner === goodPeer + _ -> property False + Nothing -> property False + ] + where + policy = arbPolicy + { scoreMax = max 200 (scoreMax arbPolicy) + , scoreRate = max 0.01 (min 1.0 (scoreRate arbPolicy)) + } + + goodPeer = good0 + badPeer = bad0 + 1000 + confPeer = conf0 + 2000 + txid = abs txid0 + 1 + txSize = mkSize txSize0 + key = TxKey 0 + k = unTxKey key + + claimableAt = Time 99.999 + tDecaySec :: Double + tDecaySec = fromIntegral (1 + (tDecay0 - 1) `mod` 10 :: Int) + decayAmount :: Double + decayAmount = tDecaySec * scoreRate policy + decayedBadScore :: Double + decayedBadScore = fromIntegral (21 + (badScore0 - 1) `mod` 80 :: Int) + badInitialScore :: Double + badInitialScore = decayedBadScore + decayAmount + badPeerScore = PeerScore + { peerScoreValue = badInitialScore + , peerScoreTs = addTime (negate (realToFrac tDecaySec)) now + } + + badRunsBeforeGood = case (elemIndex Bad order, elemIndex Good order) of + (Just bi, Just gi) -> bi < gi + _ -> False + badClaimDelay :: DiffTime + badClaimDelay = realToFrac (decayedBadScore / 20000) + expectedBadDelay :: DiffTime + expectedBadDelay + | badRunsBeforeGood = badClaimDelay - diffTime now claimableAt + | otherwise = badClaimDelay + interTxSpace policy + + -- Stale-lease holder; only present in shared state when + -- 'leaseStart = ExpiredLease'. 'nextPeerAction' is never called + -- for this peer; it exists only to set the @TxLeased@ owner with + -- @leaseUntil = claimableAt < now@. + oldOwner = good0 + bad0 + conf0 + 3000 + + sharedState0 = emptySharedTxState + { sharedTxIdToKey = Map.singleton (getRawTxId txid) key + , sharedKeyToTxId = IntMap.singleton k txid + , sharedNextTxKey = 1 + , sharedTxTable = IntMap.singleton k TxEntry + { txLease = case leaseStart of + ClaimableLease -> TxClaimable claimableAt + ExpiredLease -> TxLeased oldOwner claimableAt + , txAttempt = 0 + , txInSubmission = False + , currentMaxInflightMultiplicity = txInflightMultiplicity policy + } + } + + goodPeerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerAvailableTxIds = IntMap.singleton k txSize + } + goodInFlight0 = emptyPeerTxInFlight { pifAdvertised = IntSet.singleton k } + + badPeerState0 = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.singleton key + , peerAvailableTxIds = IntMap.singleton k txSize + , peerScore = badPeerScore + } + badInFlight0 = emptyPeerTxInFlight { pifAdvertised = IntSet.singleton k } + + confPeerState0 = emptyPeerTxLocalState + { peerRequestedTxIds = maxNumTxIdsToRequest policy + } + confInFlight0 = emptyPeerTxInFlight + + roleSetup Good = (goodPeer, goodPeerState0, goodInFlight0) + roleSetup Bad = (badPeer, badPeerState0, badInFlight0) + roleSetup Confounder = (confPeer, confPeerState0, confInFlight0) + + runOne :: (SharedTxState PeerAddr TxId, + [(PeerRole, PeerAction, PeerTxLocalState (Tx TxId), PeerTxInFlight)]) + -> PeerRole + -> (SharedTxState PeerAddr TxId, + [(PeerRole, PeerAction, PeerTxLocalState (Tx TxId), PeerTxInFlight)]) + runOne (ss, acc) role = + let (peer, ps0, pif0) = roleSetup role + (action, ps', pif', ss') = + nextPeerAction now policy peer ps0 pif0 ss + in (ss', (role, action, ps', pif') : acc) + + (sharedStateFinal, results) = foldl' runOne (sharedState0, []) order + + lookupAction :: PeerRole + -> Maybe (PeerAction, PeerTxLocalState (Tx TxId), PeerTxInFlight) + lookupAction role = + case [ (a, ps', pif') | (r, a, ps', pif') <- results, r == role ] of + [] -> Nothing + (x : _) -> Just x + +-- +-- TriggerScenario: multi-peer scheduler exercise +-- +-- Each 'ActionTrigger' on a peer's list describes one action that +-- should be choosable at the moment the test calls 'nextPeerAction'. +-- 'buildTriggerState' turns the per-peer list into a consistent +-- ('PeerTxLocalState', 'PeerTxInFlight', 'SharedTxState') triple. + +data ActionTrigger + = TSubmittable TxId (Positive Int) + -- ^ peer holds the lease, body buffered locally, ready to submit. + | TFetchable TxId (Positive Int) + -- ^ peer advertises the txid, claimable now. + | TAckable TxId + -- ^ in 'sharedRetainedTxs' + peer's unacked queue (resolved already). + | TFetchableLater (Positive Int) TxId (Positive Int) + -- ^ delay-in-seconds + txid + size: claimable only after the loop + -- has advanced 'time' by at least the delay. + deriving (Eq, Show) + +instance Arbitrary ActionTrigger where + arbitrary = oneof + [ TSubmittable <$> arbitrary <*> arbitrary + , TFetchable <$> arbitrary <*> arbitrary + , TAckable <$> arbitrary + , TFetchableLater <$> arbitrary <*> arbitrary <*> arbitrary + ] + shrink (TSubmittable t s) = + [ TFetchable t s, TAckable t ] + ++ [ TSubmittable t' s | t' <- take 1 (shrink t) ] + shrink (TFetchable t s) = + TAckable t : + [ TFetchable t' s | t' <- take 1 (shrink t) ] + shrink (TAckable t) = + [ TAckable t' | t' <- take 1 (shrink t) ] + shrink (TFetchableLater d t s) = + [ TFetchable t s, TAckable t ] + ++ [ TFetchableLater d' t s | d' <- take 1 (shrink d) ] + +triggerTxid :: ActionTrigger -> TxId +triggerTxid (TSubmittable t _) = t +triggerTxid (TFetchable t _) = t +triggerTxid (TAckable t) = t +triggerTxid (TFetchableLater _ t _) = t + +isTSubmittable :: ActionTrigger -> Bool +isTSubmittable TSubmittable{} = True +isTSubmittable _ = False + +isTAckable :: ActionTrigger -> Bool +isTAckable TAckable{} = True +isTAckable _ = False + +isTFetchableNow :: ActionTrigger -> Bool +isTFetchableNow TFetchable{} = True +isTFetchableNow _ = False + +isTFetchableLater :: ActionTrigger -> Bool +isTFetchableLater TFetchableLater{} = True +isTFetchableLater _ = False + +setTxid :: ActionTrigger -> TxId -> ActionTrigger +setTxid (TSubmittable _ s) t = TSubmittable t s +setTxid (TFetchable _ s) t = TFetchable t s +setTxid (TAckable _) t = TAckable t +setTxid (TFetchableLater d _ s) t = TFetchableLater d t s + +-- | Generator-time bias selector: 'ModeDisjoint' renumbers all +-- triggers so each peer's txid range is unique (no cross-peer overlap), +-- 'ModeShared' collapses txids into a small shared pool so cross-peer +-- overlap arises with high probability. +data OverlapMode = ModeDisjoint | ModeShared + deriving (Eq, Show) + +-- | A scenario is an overlap mode plus a per-peer trigger map. +data TriggerScenario = + TriggerScenario OverlapMode (Map.Map PeerAddr [ActionTrigger]) + deriving (Eq, Show) + +genPerPeerTriggers :: Gen [ActionTrigger] +genPerPeerTriggers = do + size <- frequency + [ (2, pure 1) + , (1, pure 0) + , (3, choose (2, 10)) + , (1, choose (11, 100)) + ] + genElem <- oneof + [ pure arbitrary + , pure (TFetchable <$> arbitrary <*> arbitrary) + , pure (TSubmittable <$> arbitrary <*> arbitrary) + , pure (TAckable <$> arbitrary) + , pure (TFetchableLater <$> arbitrary <*> arbitrary <*> arbitrary) + ] + vectorOf size genElem + +shrinkTriggerList :: [ActionTrigger] -> [[ActionTrigger]] +shrinkTriggerList ts = + let n = length ts in + [ take (n `div` 2) ts | n >= 2 ] + ++ [ drop (n `div` 2) ts | n >= 2 ] + ++ [ take i ts ++ drop (i + 1) ts | i <- [0 .. n - 1] ] + ++ [ take i ts ++ t' : drop (i + 1) ts + | i <- [0 .. n - 1] + , t' <- shrink (ts !! i) + ] + +renumberDisjoint :: [[ActionTrigger]] -> [[ActionTrigger]] +renumberDisjoint = snd . mapAccumL renumberOne 1 + where + renumberOne nextId triggers = + let n = length triggers in + (nextId + n, zipWith setTxid triggers [nextId .. nextId + n - 1]) + +collapseToPool :: Int -> [[ActionTrigger]] -> Gen [[ActionTrigger]] +collapseToPool poolSize = traverse (traverse remap) + where + remap trig = do + newId <- chooseInt (1, poolSize) + pure (setTxid trig newId) + +instance Arbitrary TriggerScenario where + arbitrary = do + nPeers <- frequency + [ (2, pure 1) + , (2, pure 2) + , (1, pure 3) + ] + perPeer <- vectorOf nPeers genPerPeerTriggers + mode <- frequency + [ (2, pure ModeDisjoint) + , (3, pure ModeShared) + ] + remapped <- case mode of + ModeDisjoint -> pure (renumberDisjoint perPeer) + ModeShared -> + let totalT = sum (map length perPeer) + poolSz = max 1 (totalT `div` 2) in + collapseToPool poolSz perPeer + pure (TriggerScenario mode + (Map.fromList (zip [1 .. nPeers] remapped))) + shrink (TriggerScenario mode m) = + [ TriggerScenario mode (Map.delete p m) + | Map.size m > 1 + , p <- Map.keys m ] ++ [ TriggerScenario mode (Map.insert p ts' m) | (p, ts) <- Map.toList m @@ -1723,8 +2236,6 @@ instance Arbitrary TriggerScenario where ] -- | Strongest trigger category seen across all peers for a given txid. --- Drives both the merged 'TxEntry' shape and the global expected-action --- assertion in 'prop_nextPeerAction_processesAllTriggers'. data TxCategory = CatSubmit | CatFetch | CatAck deriving (Eq, Show) @@ -1739,30 +2250,15 @@ hasActiveEntry TAckable{} = False hasActiveEntry _ = True -- | Build a consistent multi-peer state from a per-peer trigger map. --- Each peer's list must be normalised; triggers are globally re-keyed so --- a txid mentioned by multiple peers gets a single shared 'TxKey' and --- their 'TxEntry' merges advertiser count, attempts, and lease. --- --- For a txid: --- * if any peer has 'TSubmittable', the lowest-numbered such peer holds --- the lease ('TxLeased') and every TSubmittable peer's attempt is --- 'TxBuffered'; --- * else if any peer has 'TFetchableLater', the lease is 'TxClaimable' --- delayed by the first such trigger's delay; --- * else if any peer has 'TFetchable', the lease is 'TxClaimable' now; --- * else (only 'TAckable' across all peers) the txid lives in --- 'sharedRetainedTxs' and has no active-table entry. +-- The state must already be normalised by 'normaliseScenario'. buildTriggerState :: TxDecisionPolicy -> Map.Map PeerAddr [ActionTrigger] - -> ( Map.Map PeerAddr (PeerTxLocalState (Tx TxId)) + -> ( Map.Map PeerAddr (PeerTxLocalState (Tx TxId), PeerTxInFlight) , SharedTxState PeerAddr TxId ) buildTriggerState policy perPeer = (peerStates, sharedState0) where - -- Global txid order: peer-ascending, then per-peer trigger order; - -- first appearance wins. Stable so the same scenario always builds - -- the same state. allTxids :: [TxId] allTxids = nub [ triggerTxid t @@ -1792,12 +2288,14 @@ buildTriggerState policy perPeer = laterDelay txid = listToMaybe [ d | (_, TFetchableLater (Positive d) _ _) <- triggersFor txid ] + -- An entry's 'txAttempt' is exactly the number of TSubmittable + -- triggers for the txid; with 'dedupeAcrossPeers' that's at most 1. + -- All fetchable peers contribute via their per-peer 'pifAdvertised' + -- to keep the entry alive against the orphan sweep. mkEntry :: TxId -> TxEntry PeerAddr mkEntry txid = let trigs = triggersFor txid - activeCount = length (filter (hasActiveEntry . snd) trigs) - attempts = Map.fromList - [ (p, TxBuffered) | (p, t) <- trigs, isTSubmittable t ] + attemptCount = length [() | (_, t) <- trigs, isTSubmittable t] lease = case submittingPeer txid of Just p -> TxLeased p (addTime 10 now) Nothing -> case laterDelay txid of @@ -1805,8 +2303,8 @@ buildTriggerState policy perPeer = Nothing -> TxClaimable now in TxEntry { txLease = lease - , txAdvertiserCount = activeCount - , txAttempts = attempts + , txAttempt = attemptCount + , txInSubmission = False , currentMaxInflightMultiplicity = txInflightMultiplicity policy } @@ -1818,32 +2316,37 @@ buildTriggerState policy perPeer = retainedUntil = addTime 600 now retainedEntries = [ (txidKey txid, retainedUntil) | txid <- retainedTxids ] - mkPeerLocal :: PeerAddr -> [ActionTrigger] -> PeerTxLocalState (Tx TxId) - mkPeerLocal _ ts = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.fromList - [ TxKey (txidKey (triggerTxid t)) | t <- ts ] - , peerDownloadedTxs = IntMap.fromList - [ (txidKey t', mkTx t' (mkSize s)) - | TSubmittable t' s <- ts - ] - , peerAvailableTxIds = IntMap.fromList $ - [ (txidKey t', mkSize s) | TFetchable t' s <- ts ] - ++ [ (txidKey t', mkSize s) | TFetchableLater _ t' s <- ts ] - } - - peerStates = Map.mapWithKey mkPeerLocal perPeer - - mkSharedPeer :: PeerAddr -> [ActionTrigger] -> SharedPeerState - mkSharedPeer _ ts = - let advKeys = [ TxKey (txidKey (triggerTxid t)) - | t <- ts, hasActiveEntry t ] in - withAdvertisedTxKeys advKeys (mkSharedPeerState) + mkPeerState :: PeerAddr -> [ActionTrigger] + -> (PeerTxLocalState (Tx TxId), PeerTxInFlight) + mkPeerState peeraddr ts = + let peerLocal = emptyPeerTxLocalState + { peerUnacknowledgedTxIds = StrictSeq.fromList + [ TxKey (txidKey (triggerTxid t)) | t <- ts ] + , peerDownloadedTxs = IntMap.fromList + [ (txidKey t', mkTx t' (mkSize s)) + | TSubmittable t' s <- ts + ] + , peerAvailableTxIds = IntMap.fromList $ + [ (txidKey t', mkSize s) | TFetchable t' s <- ts ] + ++ [ (txidKey t', mkSize s) | TFetchableLater _ t' s <- ts ] + } + advertised = IntSet.fromList + [ txidKey (triggerTxid t) | t <- ts, hasActiveEntry t ] + leasedHere = IntSet.fromList + [ txidKey t' | TSubmittable t' _ <- ts + , submittingPeer t' == Just peeraddr ] + attemptingHere = leasedHere + peerInFlight = emptyPeerTxInFlight + { pifAdvertised = advertised + , pifLeased = leasedHere + , pifAttempting = attemptingHere + } + in (peerLocal, peerInFlight) - sharedPeers' = Map.mapWithKey mkSharedPeer perPeer + peerStates = Map.mapWithKey mkPeerState perPeer sharedState0 = emptySharedTxState - { sharedPeers = sharedPeers' - , sharedTxTable = IntMap.fromList + { sharedTxTable = IntMap.fromList [ (txidKey txid, mkEntry txid) | txid <- activeTxids ] , sharedTxIdToKey = Map.fromList [ (getRawTxId txid, TxKey (txidKey txid)) | txid <- allTxids ] @@ -1853,14 +2356,8 @@ buildTriggerState policy perPeer = , sharedRetainedTxs = retainedFromList retainedEntries } --- | Normalise a per-peer trigger list: shift txids to >= 1, dedupe by --- the post-shift txid, and reorder so ackables come first, then --- submittables, then fetchables. The order matters because --- 'pickSubmitAction' walks 'peerUnacknowledgedTxIds' in order and stops --- at the first non-submittable-but-known tx, and 'pickRequestTxIdsAction' --- takes the longest ackable prefix; with ackables first every ackable is --- reached, and the submit walk skips ackables (no shared-table entry) to --- pick up submittables before stopping at the first fetchable. +-- | Per-peer normalise: shift txids to >= 1, dedupe, reorder so +-- ackables come first, then submittables, then fetchables. normaliseTriggers :: [ActionTrigger] -> [ActionTrigger] normaliseTriggers = orderTriggers @@ -1878,10 +2375,7 @@ normaliseTriggers = ++ filter isTFetchableLater ts -- | Across peers, ensure each txid has 'TSubmittable' from at most one --- peer. The lowest-numbered peer keeps the 'TSubmittable'; others get --- demoted to 'TFetchable' with the same size. This avoids invariant- --- violating initial states where two peers both hold the body for the --- same tx with one as the lease-holder. +-- peer (the lowest-numbered). dedupeAcrossPeers :: Map.Map PeerAddr [ActionTrigger] -> Map.Map PeerAddr [ActionTrigger] dedupeAcrossPeers m = Map.mapWithKey (map . demote) m @@ -1893,36 +2387,64 @@ dedupeAcrossPeers m = Map.mapWithKey (map . demote) m | Map.lookup t primarySubmitter /= Just p = TFetchable t s demote _ trig = trig --- | Per-peer normalise plus cross-peer dedupe, in that order. normaliseScenario :: Map.Map PeerAddr [ActionTrigger] -> Map.Map PeerAddr [ActionTrigger] normaliseScenario = dedupeAcrossPeers . Map.map normaliseTriggers --- | Drives 'nextPeerAction' for every peer in the scenario, advancing --- the earliest-wake peer at each step. After any action that mutates --- shared state (Submit / RequestTxs / RequestTxIds), other peers are --- reactivated so they can re-evaluate against the new shared state. --- Asserts: --- --- 1. The loop terminates within the iteration budget. --- 2. Every txid whose strongest cross-peer trigger category is --- 'CatSubmit' appears in the union of submitted keys. --- 3. Every txid whose category is 'CatFetch' appears in the union of --- requested keys. --- 4. Every txid whose category is 'CatAck' appears in the union of --- acked keys. --- 5. 'combinedStateInvariant' holds on the initial state for every --- peer and after the acting peer's update at every step. -prop_nextPeerAction_processesAllTriggers - :: ArbTxDecisionPolicy - -> TriggerScenario - -> Property -prop_nextPeerAction_processesAllTriggers - (ArbTxDecisionPolicy arbPolicy) (TriggerScenario mode rawPerPeer) = - tabulate "trigger count" [bucket totalTriggers] - . tabulate "peer count" [show nPeers] - . tabulate "iterations" [bucket iterations] - . tabulate "shared txids" [bucket sharedTxidCount] +-- | Policy used by the 'TriggerScenario' meta-tests. +metaPolicy :: TxDecisionPolicy +metaPolicy = defaultTxDecisionPolicy { txInflightMultiplicity = 2 } + +prop_TriggerScenario_validInitialState :: TriggerScenario -> Property +prop_TriggerScenario_validInitialState (TriggerScenario _ rawPerPeer) = + let perPeer = normaliseScenario rawPerPeer + (peerStates, ss0) = buildTriggerState metaPolicy perPeer in + combinedStateInvariant metaPolicy StrongInvariant peerStates ss0 + +prop_TriggerScenario_shrinkPreservesValidity :: TriggerScenario -> Property +prop_TriggerScenario_shrinkPreservesValidity ts = + conjoin + [ prop_TriggerScenario_validInitialState ts' + | ts' <- shrink ts + ] + +prop_TriggerScenario_shrinkSmaller :: TriggerScenario -> Property +prop_TriggerScenario_shrinkSmaller ts@(TriggerScenario _ rawM) = + let n = sum (map length (Map.elems rawM)) in + conjoin + [ counterexample ("shrink grew the trigger count: " ++ show ts') + $ sum (map length (Map.elems rawM')) <= n + | ts'@(TriggerScenario _ rawM') <- shrink ts + ] + +prop_TriggerScenario_shrinkExcludesOriginal :: TriggerScenario -> Property +prop_TriggerScenario_shrinkExcludesOriginal ts = + counterexample "shrink contains the original value" + $ property (ts `notElem` shrink ts) + +-- | Drives 'nextPeerAction' for every peer in the scenario, advancing +-- the earliest-wake peer at each step. Asserts: +-- +-- 1. The loop terminates within the iteration budget. +-- 2. Every txid whose strongest cross-peer trigger category is +-- 'CatSubmit' or 'CatFetch' appears in the union of submitted keys +-- (since fetched bodies are deferred-delivered and then submitted). +-- 3. Every txid whose category is 'CatFetch' appears in the union of +-- requested keys. +-- 4. Every txid whose category is 'CatAck' appears in the union of +-- acked keys. +-- 5. 'combinedStateInvariant' holds on the initial state for every +-- peer and after the acting peer's update at every step. +prop_nextPeerAction_processesAllTriggers + :: ArbTxDecisionPolicy + -> TriggerScenario + -> Property +prop_nextPeerAction_processesAllTriggers + (ArbTxDecisionPolicy arbPolicy) (TriggerScenario mode rawPerPeer) = + tabulate "trigger count" [bucket totalTriggers] + . tabulate "peer count" [show nPeers] + . tabulate "iterations" [bucket iterations] + . tabulate "shared txids" [bucket sharedTxidCount] . tabulate "overlap mode" [show mode] $ conjoin [ counterexample "loop must terminate within step budget" @@ -1934,31 +2456,20 @@ prop_nextPeerAction_processesAllTriggers , counterexample ("expected acks missing: " ++ show (IntSet.toList missingAcks)) $ property (IntSet.null missingAcks) - , conjoin - [ counterexample ("initial invariant for peer " ++ show p) - $ combinedStateInvariant policy StrongInvariant p ps0 sharedState0 - | (p, ps0) <- Map.toList peerStates0 - ] + , counterexample "initial combined invariant violated" + (combinedStateInvariant policy StrongInvariant peerStates0 sharedState0) , conjoin [ counterexample ("invariant after step " ++ show n ++ " (peer " ++ show p ++ ")") inv | (n, p, inv) <- stateInvariants ] - , conjoin - [ checkQuiescence p ps - | (p, (Nothing, ps)) <- Map.toList finalSchedule - ] ] where perPeer = normaliseScenario rawPerPeer totalTriggers = sum (map length (Map.elems perPeer)) nPeers = Map.size perPeer - -- Number of txids that appear in two or more peers' trigger lists. - -- A non-zero value confirms the multi-peer apparatus is actually - -- exercising cross-peer overlap rather than running independent - -- single-peer scenarios in parallel. txidPeerCounts :: Map.Map TxId Int txidPeerCounts = Map.fromListWith (+) [ (triggerTxid t, 1 :: Int) @@ -1975,13 +2486,8 @@ prop_nextPeerAction_processesAllTriggers , maxOutstandingTxBatchesPerPeer = max 1 totalTriggers } - -- Per-peer interleaving plus lease-expiry cycles inflate iteration - -- counts versus the single-peer case; the budget grows linearly with - -- the product so each peer can claim every advertised tx without - -- exhausting the budget. maxIters = 100 + 6 * totalTriggers * max 1 nPeers - -- Global per-txid expected categories. allTxids = nub [ triggerTxid t | (_, ts) <- Map.toAscList perPeer, t <- ts ] txidToKey = Map.fromList (zip allTxids [0..]) @@ -1993,9 +2499,6 @@ prop_nextPeerAction_processesAllTriggers , triggerTxid trig == txid ] catFor txid = categoryOf (triggersFor txid) - -- With deferred body delivery, fetched txids run the full - -- Fetch -> Buffer -> Submit cycle, so 'CatFetch' txids contribute - -- to both expectedSubmitted and expectedFetched. expectedSubmitted = IntSet.fromList [ txidKey t | t <- allTxids , let c = catFor t, c == CatSubmit || c == CatFetch ] @@ -2006,1772 +2509,154 @@ prop_nextPeerAction_processesAllTriggers (peerStates0, sharedState0) = buildTriggerState policy perPeer - initialSchedule :: Map.Map PeerAddr (Maybe Time, PeerTxLocalState (Tx TxId)) - initialSchedule = Map.map (\ps -> (Just now, ps)) peerStates0 + -- Schedule entry: (next-wake-time, peerLocal, peerInFlight). + initialSchedule + :: Map.Map PeerAddr (Maybe Time, PeerTxLocalState (Tx TxId), PeerTxInFlight) + initialSchedule = Map.map (\(ps, pif) -> (Just now, ps, pif)) peerStates0 (allSubmitted, allRequested, allAcked, - stateInvariants, terminated, iterations, - finalSchedule, finalSS, finalTime) = + stateInvariants, terminated, iterations) = runLoop sharedState0 initialSchedule Map.empty IntSet.empty IntSet.empty IntSet.empty [] 0 now - missingAcks = expectedAcked `IntSet.difference` allAcked - - -- Pick the active peer (status 'Just t') with the smallest 't'. - -- Returns 'Nothing' if every peer is parked at 'Nothing' (terminated). - pickEarliest schedule = - case sortBy (compare `on` snd) - [ (p, t) | (p, (Just t, _)) <- Map.toList schedule ] of - [] -> Nothing - (p, t) : _ -> - let (_, ps) = schedule Map.! p in - Just (p, t, ps) - - -- After a state-mutating action, drag every other peer's wake to - -- 'time' (or earlier) so they re-evaluate the new shared state. A - -- peer parked at 'Nothing' (previously terminated) is reactivated. - reactivateOthers acting time = - Map.mapWithKey $ \p (status, ps) -> - if p == acting - then (status, ps) - else case status of - Just t' | t' <= time -> (Just t', ps) - _ -> (Just time, ps) - - -- Build the (txid, body) pair for a requested key by looking up the - -- txid in shared state and the size in the requesting peer's - -- pre-action 'peerAvailableTxIds'. Deferring delivery means we have - -- to capture sizes before 'applyRequestTxsChoice' moves the keys - -- out of 'peerAvailableTxIds'. - mkBody :: SharedTxState PeerAddr TxId - -> PeerTxLocalState (Tx TxId) - -> TxKey - -> (TxId, Tx TxId) - mkBody ss ps (TxKey k) = - let txid = sharedKeyToTxId ss IntMap.! k - size = peerAvailableTxIds ps IntMap.! k in - (txid, mkTx txid size) - - -- Quiescence: re-poll a peer parked at terminal status against the - -- final shared state. A truly terminated peer should produce - -- 'PeerDoNothing _ Nothing' with no state mutation. Catches - -- non-determinism in 'nextPeerAction', accidental input mutation - -- on the no-op path, and "fake termination" regressions. - checkQuiescence :: PeerAddr -> PeerTxLocalState (Tx TxId) -> Property - checkQuiescence p ps = - let (action, ps', ss') = nextPeerAction finalTime policy p ps finalSS in - case action of - PeerDoNothing _ Nothing -> - conjoin - [ counterexample ("quiescence: peer " ++ show p ++ " local state changed") - $ ps' === ps - , counterexample ("quiescence: peer " ++ show p ++ " mutated shared state") - $ ss' === finalSS - ] - other -> - counterexample - ("quiescence: peer " ++ show p ++ " produced " ++ show other) - (property False) - - -- runLoop's 'pending' field maps each peer to the bodies queued for - -- its next-iteration delivery. Drain happens just before the peer - -- acts; the request-to-delivery gap gives other peers exactly one - -- scheduling step to observe the in-flight state. - runLoop ss schedule pending subs reqs acks invs i lastTime - | i >= maxIters = - (subs, reqs, acks, reverse invs, False, i, schedule, ss, lastTime) - | otherwise = - case pickEarliest schedule of - Nothing -> - (subs, reqs, acks, reverse invs, True, i, schedule, ss, lastTime) - Just (p, time, ps) -> - let lastTime' = max lastTime time - - -- Drain p's pending body deliveries before its action. - (psPre, ssPre, pendingPre, drainInvs, stepDrain) = - case Map.lookup p pending of - Nothing -> (ps, ss, pending, [], i) - Just deliveries -> - let (_, _, ps2, ss2) = - handleReceivedTxs (const False) time policy p - deliveries ps ss - stepD = i + 1 - drainInv = conjoin - [ combinedStateInvariant policy - StrongInvariant p ps2 ss2 - , checkNoThunks - ("peerState after drain (peer " - ++ show p ++ ", step " ++ show stepD ++ ")") - ps2 - , checkNoThunks - ("sharedState after drain (peer " - ++ show p ++ ", step " ++ show stepD ++ ")") - ss2 - ] in - ( ps2, ss2, Map.delete p pending - , [(stepD, p, drainInv)], stepD ) - - (action, ps', ss') = - nextPeerAction time policy p psPre ssPre - oldUnacked = peerUnacknowledgedTxIds psPre - newUnacked = peerUnacknowledgedTxIds ps' - numAcked = StrictSeq.length oldUnacked - - StrictSeq.length newUnacked - ackedNow = IntSet.fromList $ map unTxKey - $ toList (StrictSeq.take numAcked oldUnacked) - step = stepDrain + 1 - inv = conjoin - [ combinedStateInvariant policy - StrongInvariant p ps' ss' - , checkNoThunks - ("peerState' (peer " ++ show p - ++ ", step " ++ show step ++ ")") - ps' - , checkNoThunks - ("sharedState' (peer " ++ show p - ++ ", step " ++ show step ++ ")") - ss' - ] in - case action of - PeerDoNothing _ Nothing -> - let schedule' = Map.insert p (Nothing, ps') schedule in - runLoop ss' schedule' pendingPre subs reqs acks - ((step, p, inv) : drainInvs ++ invs) step lastTime' - PeerDoNothing _ (Just delay) -> - let nextWake = addTime (max delay 0.001) time - schedule' = Map.insert p (Just nextWake, ps') schedule in - runLoop ss' schedule' pendingPre subs reqs acks - ((step, p, inv) : drainInvs ++ invs) step lastTime' - PeerSubmitTxs ks -> - let (ps'', ss'') = handleSubmittedTxs time policy p ks [] ps' ss' - postInv = conjoin - [ combinedStateInvariant policy - StrongInvariant p ps'' ss'' - , checkNoThunks - ("peerState'' (peer " ++ show p - ++ ", step " ++ show step ++ ")") - ps'' - , checkNoThunks - ("sharedState'' (peer " ++ show p - ++ ", step " ++ show step ++ ")") - ss'' - ] - others' = reactivateOthers p time schedule - schedule' = Map.insert p (Just time, ps'') others' - subs' = IntSet.union subs - (IntSet.fromList (unTxKey <$> ks)) in - runLoop ss'' schedule' pendingPre subs' reqs acks - ( (step, p, postInv) : (step, p, inv) - : drainInvs ++ invs ) step lastTime' - PeerRequestTxs ks -> - -- Queue the bodies for delivery on p's next iteration. - -- Bodies are built from the pre-action peer state so - -- the size lookup hits 'peerAvailableTxIds' before - -- 'applyRequestTxsChoice' moved the keys out. - let bodies = [ mkBody ssPre psPre k | k <- ks ] - pending' = Map.insert p bodies pendingPre - others' = reactivateOthers p time schedule - schedule' = Map.insert p (Just time, ps') others' - reqs' = IntSet.union reqs - (IntSet.fromList (unTxKey <$> ks)) in - runLoop ss' schedule' pending' subs reqs' acks - ((step, p, inv) : drainInvs ++ invs) step lastTime' - PeerRequestTxIds{} -> - let others' = reactivateOthers p time schedule - schedule' = Map.insert p (Just time, ps') others' - acks' = IntSet.union acks ackedNow in - runLoop ss' schedule' pendingPre subs reqs acks' - ((step, p, inv) : drainInvs ++ invs) step lastTime' - --- | Policy used by the 'TriggerScenario' meta-tests below. Mirrors the --- pin in 'prop_nextPeerAction_processesAllTriggers'. -metaPolicy :: TxDecisionPolicy -metaPolicy = defaultTxDecisionPolicy { txInflightMultiplicity = 2 } - --- | Every generated 'TriggerScenario' produces an initial state that --- satisfies 'combinedStateInvariant' for every peer. Catches state- --- builder bugs (wrong txids in maps, missing entries, etc.) before they --- show up as confusing failures of --- 'prop_nextPeerAction_processesAllTriggers'. -prop_TriggerScenario_validInitialState :: TriggerScenario -> Property -prop_TriggerScenario_validInitialState (TriggerScenario _ rawPerPeer) = - let perPeer = normaliseScenario rawPerPeer - (states, ss0) = buildTriggerState metaPolicy perPeer in - conjoin - [ counterexample ("invalid initial state for peer " ++ show p) - $ combinedStateInvariant metaPolicy StrongInvariant p ps ss0 - | (p, ps) <- Map.toList states - ] - --- | Every shrink of a generated scenario is itself valid. Without this, --- 'prop_nextPeerAction_processesAllTriggers' could shrink into invalid --- territory and report a bogus counterexample. -prop_TriggerScenario_shrinkPreservesValidity :: TriggerScenario -> Property -prop_TriggerScenario_shrinkPreservesValidity ts = - conjoin - [ prop_TriggerScenario_validInitialState ts' - | ts' <- shrink ts - ] - --- | Shrinks never grow the total trigger count. Catches shrinker --- regressions that would slow down or invalidate the main property's --- shrinking. -prop_TriggerScenario_shrinkSmaller :: TriggerScenario -> Property -prop_TriggerScenario_shrinkSmaller ts@(TriggerScenario _ rawM) = - let n = sum (map length (Map.elems rawM)) in - conjoin - [ counterexample ("shrink grew the trigger count: " ++ show ts') - $ sum (map length (Map.elems rawM')) <= n - | ts'@(TriggerScenario _ rawM') <- shrink ts - ] - --- | A scenario is not in its own shrink list. Catches shrink-into-self --- regressions that would loop QuickCheck. -prop_TriggerScenario_shrinkExcludesOriginal :: TriggerScenario -> Property -prop_TriggerScenario_shrinkExcludesOriginal ts = - counterexample "shrink contains the original value" - $ property (ts `notElem` shrink ts) - - --- | Roles for 'prop_nextPeerAction_claimsClaimableTx': 'Good' has no --- score and can claim; 'Bad' has a 'peerClaimDelay' that pushes its --- claim past 'now'; 'Confounder' has no relevant local state and is --- expected to do nothing on any 'nextPeerAction' call. -data PeerRole = Good | Bad | Confounder - deriving (Eq, Show) - --- | Initial-lease form for 'prop_nextPeerAction_claimsClaimableTx': --- 'ClaimableLease' starts with 'TxClaimable claimableAt' (no current --- holder); 'ExpiredLease' starts with 'TxLeased oldOwner claimableAt' --- (held by a 'PeerWaitingTxs'-phase peer whose 'leaseUntil' is in the --- past). Production routes both through 'txClaimReadyAt', so the same --- claim arithmetic applies in either form -- this axis subsumes the --- expired-lease scenario into the same property. -data LeaseStart = ClaimableLease | ExpiredLease - deriving (Eq, Show) - -instance Arbitrary LeaseStart where - arbitrary = elements [ClaimableLease, ExpiredLease] - --- | A scheduling order over the three roles. The 'Arbitrary' instance --- shuffles the three roles uniformly so each of the six permutations --- is reached. -newtype PeerOrder = PeerOrder [PeerRole] - deriving (Eq, Show) - -instance Arbitrary PeerOrder where - arbitrary = PeerOrder <$> shuffle [Good, Bad, Confounder] - --- | Verifies that 'Good' (no score) wins the lease regardless of which --- order the three peers are scheduled. 'Bad' is score-delayed past --- 'now' so its 'nextPeerAction' yields without claiming; 'Confounder' --- has no advertised tx and yields trivially. The lease must end up at --- 'Good' across all six role orderings. -prop_nextPeerAction_claimsClaimableTx - :: ArbTxDecisionPolicy - -> Positive Int - -> Positive Int - -> Positive Int - -> TxId - -> Positive Int - -> Positive Int - -> Positive Int - -> PeerOrder - -> LeaseStart - -> Property -prop_nextPeerAction_claimsClaimableTx - (ArbTxDecisionPolicy arbPolicy) - (Positive good0) (Positive bad0) (Positive conf0) - txid0 txSize0 (Positive badScore0) (Positive tDecay0) (PeerOrder order) - leaseStart = - tabulate "order" [show order] - . tabulate "bad score (decayed)" - [bucket (round decayedBadScore :: Int)] - . tabulate "tDecay (s)" [bucket (round tDecaySec :: Int)] - . tabulate "lease start" [show leaseStart] - $ conjoin - [ peerTxLocalStateInvariant policy goodPeerState0 - , peerTxLocalStateInvariant policy badPeerState0 - , peerTxLocalStateInvariant policy confPeerState0 - , counterexample - ("Good must claim: " ++ show (lookupResult Good)) $ - case lookupResult Good of - Just (PeerRequestTxs txKeys, _) -> txKeys === [key] - _ -> property False - , counterexample "Good must record the requested tx" $ - case lookupResult Good of - Just (_, ps') -> peerRequestedTxs ps' === IntSet.singleton k - Nothing -> property False - , counterexample - ("Bad must yield with the score-delay derived wake: " - ++ show (lookupResult Bad)) $ - case lookupResult Bad of - Just (PeerDoNothing _ (Just delay), _) -> - -- Tolerance absorbs sub-picosecond FP drift from - -- production's 'Double' score arithmetic. 1ns is well - -- below any production-relevant timing distinction. - let diff = abs (delay - expectedBadDelay) in - counterexample - ("delay = " ++ show delay - ++ ", expected = " ++ show expectedBadDelay - ++ ", diff = " ++ show diff) - (property (diff < 1e-9)) - other -> - counterexample ("got: " ++ show other) (property False) - , counterexample - ("Confounder must do nothing with no scheduled wake: " - ++ show (lookupResult Confounder)) $ - case lookupResult Confounder of - Just (PeerDoNothing _ Nothing, _) -> property True - _ -> property False - , counterexample "Lease must end up at Good" $ - txLease (lookupEntryOrFail key sharedStateFinal) === - TxLeased goodPeer (addTime (interTxSpace policy) now) - , checkNoThunks "sharedStateFinal" sharedStateFinal - , conjoin - [ checkNoThunks - ("peerState' for " ++ show role) - (ps' :: PeerTxLocalState (Tx TxId)) - | (role, _, ps', _) <- results - ] - , conjoin - [ counterexample - ("combined invariant for " ++ show role) - (combinedStateInvariant policy StrongInvariant - (fst (roleSetup role)) ps' ss') - | (role, _, ps', ss') <- results - ] - ] - where - -- Pin 'scoreMax' high enough to fit a pre-decay score of - -- (decayed-target + decayAmount) without clamping; bound - -- 'scoreRate' to a non-zero range so the decay arithmetic in - -- 'currentPeerScore' produces an observable change. - policy = arbPolicy - { scoreMax = max 200 (scoreMax arbPolicy) - , scoreRate = max 0.01 (min 1.0 (scoreRate arbPolicy)) - } - - goodPeer = good0 - badPeer = bad0 + 1000 - confPeer = conf0 + 2000 - txid = abs txid0 + 1 - txSize = mkSize txSize0 - key = TxKey 0 - k = unTxKey key - - -- Score decay parameters: 'peerScoreTs' is 'tDecaySec' seconds - -- before 'now', so 'currentPeerScore' takes its decay arm. The - -- pre-decay 'peerScoreValue' is chosen so the decayed value lands - -- in [21..100] -- below the pinned 'scoreMax = 200' and above the - -- 1ms-delay threshold of 'now - claimableAt = 0.001s'. - claimableAt = Time 99.999 - tDecaySec :: Double - tDecaySec = fromIntegral (1 + (tDecay0 - 1) `mod` 10 :: Int) - decayAmount :: Double - decayAmount = tDecaySec * scoreRate policy - -- Decayed score in [21..100] regardless of 'tDecaySec'/'scoreRate'. - decayedBadScore :: Double - decayedBadScore = fromIntegral (21 + (badScore0 - 1) `mod` 80 :: Int) - badInitialScore :: Double - badInitialScore = decayedBadScore + decayAmount - badPeerScore = PeerScore - { peerScoreValue = badInitialScore - , peerScoreTs = addTime (negate (realToFrac tDecaySec)) now - } - -- Bad's wake delay depends on whether Good claimed before Bad ran: - -- * Bad runs while the lease is 'TxClaimable claimableAt': wake at - -- 'claimableAt + peerClaimDelay', delay relative to 'now'. - -- * Bad runs after Good claimed (lease is 'TxLeased Good - -- (now + interTxSpace)'): wake at 'leaseUntil + peerClaimDelay', - -- delay = 'interTxSpace + peerClaimDelay'. - -- The score-delay formula '/ 20000' is replicated independently - -- here (rather than imported) so divergence in the production - -- 'peerClaimDelay' surfaces as a delay mismatch. - badRunsBeforeGood = case (elemIndex Bad order, elemIndex Good order) of - (Just bi, Just gi) -> bi < gi - _ -> False - -- Independent score-delay formula using the *decayed* score: this - -- replicates production's 'peerClaimDelay (currentPeerScore _ _)' - -- in two steps so divergences in either decay direction (S11) or - -- the '/ 20000' divisor (S14) surface as a delay mismatch. - badClaimDelay :: DiffTime - badClaimDelay = realToFrac (decayedBadScore / 20000) - expectedBadDelay :: DiffTime - expectedBadDelay - | badRunsBeforeGood = badClaimDelay - diffTime now claimableAt - | otherwise = badClaimDelay + interTxSpace policy - - -- 'oldOwner' is only present in shared state when 'leaseStart = - -- ExpiredLease'. It holds a stale 'TxLeased oldOwner claimableAt' - -- with 'leaseUntil = claimableAt < now', so the lease is expired - -- and any other advertiser whose 'peerClaimDelay' permits can - -- claim. 'nextPeerAction' is never called for 'oldOwner' -- it - -- exists only to bump 'txAdvertiserCount' and supply the - -- expired-lease holder. - oldOwner = good0 + bad0 + conf0 + 3000 - oldOwnerEntries = case leaseStart of - ClaimableLease -> [] - ExpiredLease -> - [ ( oldOwner - , withAdvertisedTxKeys [key] (mkSharedPeerState) - ) - ] - sharedState0 = emptySharedTxState - { sharedPeers = Map.fromList - $ [ (goodPeer, withAdvertisedTxKeys [key] (mkSharedPeerState)) - , (badPeer, withAdvertisedTxKeys [key] (mkSharedPeerState)) - , (confPeer, mkSharedPeerState) - ] - ++ oldOwnerEntries - , sharedTxIdToKey = Map.singleton (getRawTxId txid) key - , sharedKeyToTxId = IntMap.singleton k txid - , sharedNextTxKey = 1 - , sharedTxTable = IntMap.singleton k TxEntry - { txLease = case leaseStart of - ClaimableLease -> TxClaimable claimableAt - ExpiredLease -> TxLeased oldOwner claimableAt - , txAdvertiserCount = case leaseStart of - ClaimableLease -> 2 - ExpiredLease -> 3 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - } - } - - goodPeerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerAvailableTxIds = IntMap.singleton k txSize - } - badPeerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerAvailableTxIds = IntMap.singleton k txSize - , peerScore = badPeerScore - } - -- 'peerRequestedTxIds' pinned at the per-peer cap so Confounder has - -- no headroom for a 'PeerRequestTxIds' action; combined with empty - -- 'peerAvailableTxIds'/'peerUnacknowledgedTxIds'/'peerDownloadedTxs' - -- and no advertised keys, Confounder has genuinely nothing to do - -- and must yield 'PeerDoNothing _ Nothing'. - confPeerState0 = emptyPeerTxLocalState - { peerRequestedTxIds = maxNumTxIdsToRequest policy - } - - roleSetup Good = (goodPeer, goodPeerState0) - roleSetup Bad = (badPeer, badPeerState0) - roleSetup Confounder = (confPeer, confPeerState0) - - -- Run nextPeerAction for each peer in the generated order, - -- threading shared state. Records the post-action peer state and - -- shared state per role so each peer's joint - -- '(ps', ss')' can be checked by 'combinedStateInvariant'. - runOne (ss, acc) role = - let (peer, ps0) = roleSetup role - (action, ps', ss') = nextPeerAction now policy peer ps0 ss - in (ss', (role, action, ps', ss') : acc) - - (sharedStateFinal, resultsRev) = foldl' runOne (sharedState0, []) order - results :: [( PeerRole - , PeerAction - , PeerTxLocalState (Tx TxId) - , SharedTxState PeerAddr TxId )] - results = reverse resultsRev - - lookupResult :: PeerRole - -> Maybe (PeerAction, PeerTxLocalState (Tx TxId)) - lookupResult role = listToMaybe - [ (action, ps') | (r, action, ps', _) <- results, r == role ] - --- | A peer's score decays linearly at 'scoreRate' from its last --- timestamped value, clamped to zero. -unit_peerScore_decaysOverTime :: (String -> IO ()) -> Assertion -unit_peerScore_decaysOverTime step = do - step "After 50 seconds at scoreRate 0.1 a score of 10 should drain to 5" - currentPeerScore policy (Time 50) score0 @?= 5 - step "After 200 seconds the score should be fully decayed (clamped to 0)" - currentPeerScore policy (Time 200) score0 @?= 0 - step "Reading at the same instant as the last update returns the unchanged value" - currentPeerScore policy (Time 0) score0 @?= peerScoreValue score0 - where - policy = defaultTxDecisionPolicy - score0 = PeerScore { peerScoreValue = 10, peerScoreTs = Time 0 } - --- | A new rejection drains the existing score at 'scoreRate' per second --- since its last update before adding the rejection count. -unit_applyPeerRejections_drainsThenAdds :: (String -> IO ()) -> Assertion -unit_applyPeerRejections_drainsThenAdds step = do - step "Starting score 10 at Time 0; one rejection 50s later: 10 - (50 * 0.1) + 1 = 6" - fst (applyPeerRejections policy (Time 50) 1 peerState0) @?= 6 - where - policy = defaultTxDecisionPolicy - peerState0 = emptyPeerTxLocalState - { peerScore = PeerScore { peerScoreValue = 10 - , peerScoreTs = Time 0 } } - -unit_nextPeerAction_claimsAtScoreDelayThreshold :: (String -> IO ()) -> Assertion -unit_nextPeerAction_claimsAtScoreDelayThreshold step = do - step "Run nextPeerAction for a peer whose score contributes exactly a 1 ms claim delay" - case peerAction of - PeerRequestTxs txKeys -> do - step "Assert the tx becomes claimable once the peerScore / 20 ms threshold has elapsed" - txKeys @?= [key] - peerRequestedTxs peerState' @?= IntSet.singleton k - txLease (lookupEntryOrFail key sharedState') @?= - TxLeased peeraddr (addTime (interTxSpace defaultTxDecisionPolicy) now) - _ -> - assertFailure ("unexpected peer action: " ++ show peerAction) - where - peeraddr = 7 - txid = 1 - txSize = mkSize (Positive 10) - key = TxKey 0 - k = unTxKey key - claimableAt = Time 99.999 - sharedState0 = emptySharedTxState - { sharedPeers = - Map.singleton peeraddr - (withAdvertisedTxKeys [key] (mkSharedPeerState)) - , sharedTxIdToKey = Map.singleton (getRawTxId txid) key - , sharedKeyToTxId = IntMap.singleton k txid - , sharedNextTxKey = 1 - , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxClaimable claimableAt - , txAdvertiserCount = 1 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = - txInflightMultiplicity defaultTxDecisionPolicy - } - } - peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerAvailableTxIds = IntMap.singleton k txSize - , peerScore = PeerScore 20 now - } - (peerAction, peerState', sharedState') = - nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 - --- | A peer whose submission attempt has been cleared (e.g. after a --- mempool rejection) must not prevent another advertiser from claiming --- the same tx. After one peer's attempt is cleared and its lease --- released back to 'TxClaimable', any other peer that still advertises --- the tx should be able to claim it on its next 'nextPeerAction' pass. --- --- Exercises the cross-peer retry invariant in the 'txSelectable' / --- 'nextPeerAction' path: once no peer has an outstanding attempt on a --- tx and its lease is claimable, a still-advertising peer is eligible --- to re-claim. -unit_nextPeerAction_claimsRejectedTxFromOtherAdvertiser - :: (String -> IO ()) -> Assertion -unit_nextPeerAction_claimsRejectedTxFromOtherAdvertiser step = do - step "Run handleSubmittedTxs with peerA rejecting the tx" - let (peerAStateAfter, sharedStateAfterRejection) = - handleSubmittedTxs now defaultTxDecisionPolicy peerA - [] - [TxKey txKeyInt] - peerAState - sharedState0 - entryAfterRejection = - lookupEntryOrFail (TxKey txKeyInt) sharedStateAfterRejection - - step "Assert peerA's downloadedTxs is cleared" - peerDownloadedTxs peerAStateAfter @?= IntMap.empty - - step "Assert the lease is released and peerA leaves the advertiser set" - txLease entryAfterRejection @?= TxClaimable now - txAttempts entryAfterRejection @?= Map.empty - txAdvertiserCount entryAfterRejection @?= 1 - - step "Assert combinedStateInvariant for peerA after rejection" - assertProperty "combinedStateInvariant peerA after rejection" $ - combinedStateInvariant defaultTxDecisionPolicy StrongInvariant - peerA peerAStateAfter sharedStateAfterRejection - - step "Assert NoThunks on post-rejection states" - assertNoThunks "peerAStateAfter" peerAStateAfter - assertNoThunks "sharedStateAfterRejection" sharedStateAfterRejection - - step "Run nextPeerAction for peerB" - let (action, peerBStateAfter, sharedStateFinal) = - nextPeerAction now defaultTxDecisionPolicy peerB peerBState - sharedStateAfterRejection - - step "Assert peerB claims the released tx exclusively" - action @?= PeerRequestTxs [TxKey txKeyInt] - peerRequestedTxs peerBStateAfter @?= IntSet.singleton txKeyInt - - step "Assert peerB now holds the lease" - txLease (lookupEntryOrFail (TxKey txKeyInt) sharedStateFinal) @?= - TxLeased peerB (addTime (interTxSpace defaultTxDecisionPolicy) now) - - step "Assert combinedStateInvariant for peerB after claim" - assertProperty "combinedStateInvariant peerB after claim" $ - combinedStateInvariant defaultTxDecisionPolicy StrongInvariant - peerB peerBStateAfter sharedStateFinal - - step "Assert NoThunks on post-claim states" - assertNoThunks "peerBStateAfter" peerBStateAfter - assertNoThunks "sharedStateFinal" sharedStateFinal - where - peerA, peerB :: PeerAddr - peerA = 1 - peerB = 2 - - txid :: TxId - txid = 4 - - txKeyInt :: Int - txKeyInt = 0 - - txSize :: SizeInBytes - txSize = 100 - - txBody :: Tx TxId - txBody = mkTx txid txSize - - sharedState0 :: SharedTxState PeerAddr TxId - sharedState0 = - ensurePeerAdvertisesTxKeys peerA [TxKey txKeyInt] - $ ensurePeerAdvertisesTxKeys peerB [TxKey txKeyInt] - $ emptySharedTxState - { sharedTxIdToKey = Map.singleton (getRawTxId txid) (TxKey txKeyInt) - , sharedKeyToTxId = IntMap.singleton txKeyInt txid - , sharedNextTxKey = txKeyInt + 1 - , sharedTxTable = IntMap.singleton txKeyInt TxEntry - { txLease = TxLeased peerA (addTime 1 now) - , txAdvertiserCount = 2 - , txAttempts = Map.singleton peerA TxBuffered - , currentMaxInflightMultiplicity = - txInflightMultiplicity defaultTxDecisionPolicy - } - } - - peerAState :: PeerTxLocalState (Tx TxId) - peerAState = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.singleton (TxKey txKeyInt) - , peerDownloadedTxs = IntMap.singleton txKeyInt txBody - } - - peerBState :: PeerTxLocalState (Tx TxId) - peerBState = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.singleton (TxKey txKeyInt) - , peerAvailableTxIds = IntMap.singleton txKeyInt txSize - } - --- | Verifies that 'pickRequestTxsAction' obeys the soft-budget batch --- semantics across a list of available txs: --- --- * The first tx in advertisement order is requested even if its --- size exceeds the per-peer inflight budget (provided the peer --- hasn't already filled the inflight cap, which it hasn't here). --- * Each subsequent tx is included while --- 'selectedSize + txSize <= sizeBudget'. --- * The walk stops at the first tx that does not fit; later (smaller) --- txs are not tried. --- --- Subsumes the single-tx soft-budget claim (one element in the list) --- and pins the multi-tx '<= sizeBudget' boundary that catches the --- 'exceedsBudget' branch logic. -prop_nextPeerAction_picksTxsRespectingBudget - :: ArbTxDecisionPolicy - -> Positive Int - -> NonEmptyList (Positive Int) - -> Positive Int - -> Positive Int - -> Property -prop_nextPeerAction_picksTxsRespectingBudget - (ArbTxDecisionPolicy basePolicy) (Positive peeraddr) - (NonEmpty rawSizes) (Positive budget0) (Positive prefilledCount0) = - tabulate "tx count" [bucket n] - . tabulate "budget mode" [budgetMode] - . tabulate "prefilled count" [bucket prefilledCount] - . tabulate "selected count" [bucket (length expectedSelection)] - $ conjoin - [ peerTxLocalStateInvariant policy peerState0 - , -- 'expectedSelection' empty implies the budget is exhausted - -- (cap consumed or no candidates), so the action must be - -- 'PeerDoNothing'; otherwise it must match exactly. - case (expectedSelection, action) of - ([], PeerDoNothing _ _) -> property True - ([], other) -> - counterexample ("expected PeerDoNothing, got: " ++ show other) - (property False) - (_, PeerRequestTxs txKeys) -> txKeys === expectedKeys - (_, other) -> - counterexample - ("expected PeerRequestTxs " ++ show expectedKeys - ++ ", got: " ++ show other) - (property False) - , counterexample "peerRequestedTxsSize tracks total in-flight" $ - peerRequestedTxsSize peerState' === prefilledSize + sumExpected - , conjoin - [ counterexample ("lease for selected key " ++ show k) $ - txLease (lookupEntryOrFail (TxKey k) sharedState') === - TxLeased peeraddr (addTime (interTxSpace policy) now) - | (k, _) <- expectedSelection - ] - , conjoin - [ counterexample ("prefilled lease unchanged for key " ++ show k) $ - txLease (lookupEntryOrFail (TxKey k) sharedState') === - TxLeased peeraddr (addTime 10 now) - | (k, _) <- prefilledTxs - ] - , conjoin - [ counterexample - ("unselected candidate lease unchanged for key " ++ show k) $ - txLease (lookupEntryOrFail (TxKey k) sharedState') === - TxClaimable now - | (k, _) <- candidateTxs - , TxKey k `notElem` expectedKeys - ] - , conjoin - [ counterexample ("txAttempts for selected key " ++ show k) $ - txAttempts (lookupEntryOrFail (TxKey k) sharedState') === - Map.singleton peeraddr TxDownloading - | (k, _) <- expectedSelection - ] - , conjoin - [ counterexample ("txAttempts unchanged for prefilled key " ++ show k) $ - txAttempts (lookupEntryOrFail (TxKey k) sharedState') === - Map.singleton peeraddr TxDownloading - | (k, _) <- prefilledTxs - ] - , conjoin - [ counterexample ("txAttempts unchanged for unselected key " ++ show k) $ - txAttempts (lookupEntryOrFail (TxKey k) sharedState') === - Map.empty - | (k, _) <- candidateTxs - , TxKey k `notElem` expectedKeys - ] - , combinedStateInvariant policy StrongInvariant peeraddr - peerState' sharedState' - , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedState'" sharedState' - ] - where - txSizes :: [SizeInBytes] - txSizes = map mkSize rawSizes - n = length txSizes - - -- All txs in advertisement order, indexed by 'TxKey'. - indexed :: [(Int, SizeInBytes)] - indexed = zip [0..] txSizes - - keys :: [TxKey] - keys = [TxKey k | (k, _) <- indexed] - - txidFor :: Int -> TxId - txidFor i = i + 1 - - -- Number of pre-existing in-flight txs taken from the start of - -- 'indexed'. Range '[0..n]' covers fully-fresh, partial-prefill, - -- and fully-prefilled (no candidates) configurations. - prefilledCount :: Int - prefilledCount = (prefilledCount0 - 1) `mod` (n + 1) - - (prefilledTxs, candidateTxs) = splitAt prefilledCount indexed - - prefilledSize :: SizeInBytes - prefilledSize = sum (map snd prefilledTxs) - - candidateTotal :: Int - candidateTotal = sum (map (fromIntegral . getSizeInBytes . snd) candidateTxs) - - -- Budget split into three regions to keep the multi-tx fitting - -- cases well-represented in the corpus rather than getting drowned - -- by cap-consumed runs: - -- - -- * 25% "low" ('[0..prefilledSize-1]' or just 0 when - -- prefilledSize is 0) -- exercises 'cap consumed'. - -- * 50% "mid" ('[prefilledSize..prefilledSize+candidateTotal]') - -- -- the partial-fitting boundary where S4-shape budget - -- mutations live. - -- * 25% "high" ('[prefilledSize+candidateTotal+1..]') -- "all - -- fits" with budget to spare. - -- - -- 'budget0' is split: bottom two bits select the region, upper - -- bits position within it. - budgetVal :: Int - budgetVal = - let region = (budget0 - 1) `mod` 4 - offset = (budget0 - 1) `div` 4 - pSize = fromIntegral prefilledSize - in case region of - 0 -> offset `mod` max 1 pSize - 3 -> pSize + candidateTotal + 1 - + offset `mod` max 1 candidateTotal - _ -> pSize + offset `mod` (candidateTotal + 1) - budget :: SizeInBytes - budget = fromIntegral budgetVal - - -- Remaining budget after accounting for in-flight prefilled bytes. - sizeBudget :: Int - sizeBudget = max 0 (budgetVal - fromIntegral prefilledSize) - - -- Independent expected selection mirroring production's - -- 'exceedsBudget' with non-zero starting 'peerRequestedTxsSize'. - -- Soft-budget allowance for the first candidate fires only when - -- 'prefilledSize < budget' (the cap isn't already consumed). - -- Accumulators carried as 'Int' to avoid 'SizeInBytes' overflow in - -- the wider arithmetic. - expectedSelection :: [(Int, SizeInBytes)] - expectedSelection = go [] (0 :: Int) candidateTxs - where - go acc _ [] = reverse acc - go acc tot ((k, s) : rest) - | exceedsBudget tot (fromIntegral s) = reverse acc - | otherwise = go ((k, s) : acc) (tot + fromIntegral s) rest - - exceedsBudget :: Int -> Int -> Bool - exceedsBudget selectedSize txSize - | selectedSize + txSize <= sizeBudget = False - | selectedSize > 0 = True - | otherwise = fromIntegral prefilledSize >= budgetVal - - expectedKeys :: [TxKey] - expectedKeys = [TxKey k | (k, _) <- expectedSelection] - - sumExpected :: SizeInBytes - sumExpected = sum (map snd expectedSelection) - - budgetMode - | null candidateTxs = "no candidates" - | budgetVal >= fromIntegral prefilledSize + candidateTotal = "all fits" - | fromIntegral prefilledSize >= budgetVal = "cap consumed" - | maybe False (\(_, s) -> sizeBudget < fromIntegral s) - (listToMaybeFst candidateTxs) = "only first (soft)" - | otherwise = "partial" - where - listToMaybeFst :: [a] -> Maybe a - listToMaybeFst [] = Nothing - listToMaybeFst (x : _) = Just x - - -- 'maxOutstandingTxBatchesPerPeer' pinned to at least 2 so the - -- prefilled batch (when present) plus a fresh request batch fit. - policy = basePolicy - { txsSizeInflightPerPeer = budget - , maxOutstandingTxBatchesPerPeer = - max 2 (maxOutstandingTxBatchesPerPeer basePolicy) - } - - sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr - (withAdvertisedTxKeys keys (mkSharedPeerState)) - , sharedTxIdToKey = Map.fromList - [ (getRawTxId (txidFor i), TxKey i) | (i, _) <- indexed ] - , sharedKeyToTxId = IntMap.fromList - [ (i, txidFor i) | (i, _) <- indexed ] - , sharedNextTxKey = n - , sharedTxTable = IntMap.fromList - [ (i, mkEntry i) | (i, _) <- indexed ] - } - where - mkEntry i - | i < prefilledCount = TxEntry - { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertiserCount = 1 - , txAttempts = Map.singleton peeraddr TxDownloading - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - } - | otherwise = TxEntry - { txLease = TxClaimable now - , txAdvertiserCount = 1 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - } - - peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.fromList keys - , peerAvailableTxIds = IntMap.fromList indexed - , peerRequestedTxs = IntSet.fromList - [ k | (k, _) <- prefilledTxs ] - , peerRequestedTxBatches = case prefilledTxs of - [] -> StrictSeq.empty - xs -> StrictSeq.singleton - (mkRequestedTxBatch - [TxKey k | (k, _) <- xs] - prefilledSize) - , peerRequestedTxsSize = prefilledSize - -- 'peerRequestedTxIds' pinned at the cap so the action under test - -- is 'PeerRequestTxs' (or 'PeerDoNothing'), not 'PeerRequestTxIds'. - , peerRequestedTxIds = maxNumTxIdsToRequest policy - } - - (action, peerState', sharedState') = - nextPeerAction now policy peeraddr peerState0 sharedState0 - --- Verifies that nextPeerAction skips available txs blocked by another --- peer's lease and requests a later claimable tx instead. -unit_nextPeerAction_skipsBlockedAvailableTxs :: (String -> IO ()) -> Assertion -unit_nextPeerAction_skipsBlockedAvailableTxs step = do - step "Run nextPeerAction with one blocked tx and one later claimable tx" - case peerAction of - PeerRequestTxs [TxKey requested] -> do - step "Assert the later claimable tx is requested and leased" - requested @?= kClaimable - peerRequestedTxs peerState' @?= IntSet.singleton kClaimable - fmap txLease (IntMap.lookup kClaimable (sharedTxTable sharedState')) @?= - Just (TxLeased peeraddr (addTime (interTxSpace policy) testNow)) - other -> - assertFailure ("unexpected action: " ++ show other) - where - testNow = Time 100 - policy = defaultTxDecisionPolicy - peeraddr = 7 :: PeerAddr - otherPeer = 8 :: PeerAddr - blockedKey = TxKey 1 - claimableKey = TxKey 2 - kBlocked = 1 - kClaimable = 2 - peerState = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.fromList [blockedKey, claimableKey] - , peerAvailableTxIds = IntMap.fromList [(kBlocked, 10), (kClaimable, 11)] - } - sharedState :: SharedTxState PeerAddr TxId - sharedState = emptySharedTxState - { sharedPeers = Map.fromList - [ (peeraddr, withAdvertisedTxKeys [blockedKey, claimableKey] - (mkSharedPeerState)) - , (otherPeer, withAdvertisedTxKeys [blockedKey] - (mkSharedPeerState)) - ] - , sharedTxTable = IntMap.fromList - [ (kBlocked, TxEntry - { txLease = TxLeased otherPeer (addTime 10 testNow) - , txAdvertiserCount = 2 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - }) - , (kClaimable, TxEntry - { txLease = TxClaimable (Time 0) - , txAdvertiserCount = 1 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - }) - ] - } - (peerAction, peerState', sharedState') = - nextPeerAction testNow policy peeraddr peerState sharedState - --- Verifies that nextPeerAction submits buffered owned txs before --- acknowledging their txids. -prop_nextPeerAction_ownerSubmitsBuffered - :: ArbTxDecisionPolicy - -> Positive Int - -> TxId - -> Positive Int - -> Property -prop_nextPeerAction_ownerSubmitsBuffered (ArbTxDecisionPolicy policy) (Positive peeraddr) txid0 txSize0 = - case peerAction of - PeerSubmitTxs [txKey] -> - conjoin - [ txKey === key - , peerState' === peerState0 { peerPhase = PeerSubmittingToMempool } - -- Submit selection atomically marks the chosen tx as TxSubmitting - -- so concurrent peer decisions exclude it. - , sharedState' === markSubmittingTxs peeraddr [key] sharedState0 - ] - _ -> counterexample ("unexpected peer action: " ++ show peerAction) False - where - txid = abs txid0 + 1 - txSize = mkSize txSize0 - tx = mkTx txid txSize - key = TxKey 0 - k = unTxKey key - sharedState0 = emptySharedTxState - { sharedPeers = - Map.singleton peeraddr - (withAdvertisedTxKeys [key] (mkSharedPeerState)) - , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxClaimable (Time 0) - , txAdvertiserCount = 1 - , txAttempts = Map.singleton peeraddr TxBuffered - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - } - , sharedTxIdToKey = Map.singleton (getRawTxId txid) key - , sharedKeyToTxId = IntMap.singleton k txid - , sharedNextTxKey = 1 - } - peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerRequestedTxIds = maxNumTxIdsToRequest policy - , peerDownloadedTxs = IntMap.singleton k tx - } - (peerAction, peerState', sharedState') = nextPeerAction now policy peeraddr peerState0 sharedState0 - --- Verifies that a blocked buffered tx does not prevent the peer from --- requesting a different claimable tx body. -unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx :: (String -> IO ()) -> Assertion -unit_nextPeerAction_requestsOtherWorkDespiteBlockedBufferedTx step = do - step "Run nextPeerAction with one blocked buffered tx and one claimable tx" - case peerAction of - PeerRequestTxs [TxKey requested] -> do - step "Assert the blocked tx stays buffered while the claimable tx is requested" - requested @?= kClaimable - peerUnacknowledgedTxIds peerState' @?= peerUnacknowledgedTxIds peerState0 - peerRequestedTxs peerState' @?= IntSet.singleton kClaimable - peerDownloadedTxs peerState' @?= peerDownloadedTxs peerState0 - txAttempts (lookupEntryOrFail blockedKey sharedState') @?= txAttempts blockedEntry - txLease (lookupEntryOrFail claimableKey sharedState') @?= - TxLeased peeraddr (addTime (interTxSpace defaultTxDecisionPolicy) now) - other -> - assertFailure ("unexpected action: " ++ show other) - where - peeraddr = 7 - submittingPeer = 8 - blockedTxid = 1 - claimableTxid = 2 - blockedSize = mkSize (Positive 10) - claimableSize = mkSize (Positive 11) - blockedKey = TxKey 1 - claimableKey = TxKey 2 - kBlocked = unTxKey blockedKey - kClaimable = unTxKey claimableKey - blockedTx = mkTx blockedTxid blockedSize - blockedEntry = TxEntry - { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertiserCount = 2 - , txAttempts = Map.fromList - [ (peeraddr, TxBuffered) - , (submittingPeer, TxSubmitting) - ] - , currentMaxInflightMultiplicity = - txInflightMultiplicity defaultTxDecisionPolicy - } - sharedState0 = emptySharedTxState - { sharedPeers = Map.fromList - [ (peeraddr, withAdvertisedTxKeys [blockedKey, claimableKey] - (mkSharedPeerState)) - , (submittingPeer, withAdvertisedTxKeys [blockedKey] - (mkSharedPeerState)) - ] - , sharedTxTable = IntMap.fromList - [ (kBlocked, blockedEntry) - , (kClaimable, TxEntry - { txLease = TxClaimable (Time 0) - , txAdvertiserCount = 1 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = - txInflightMultiplicity defaultTxDecisionPolicy - }) - ] - , sharedTxIdToKey = Map.fromList - [ (getRawTxId blockedTxid, blockedKey) - , (getRawTxId claimableTxid, claimableKey) - ] - , sharedKeyToTxId = IntMap.fromList - [ (kBlocked, blockedTxid) - , (kClaimable, claimableTxid) - ] - , sharedNextTxKey = 3 - } - peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.fromList [blockedKey, claimableKey] - , peerAvailableTxIds = IntMap.fromList - [ (kBlocked, blockedSize) - , (kClaimable, claimableSize) - ] - , peerDownloadedTxs = IntMap.singleton kBlocked blockedTx - } - (peerAction, peerState', sharedState') = - nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 - --- Verifies that txid acknowledgements stop before a blocked buffered tx, so --- earlier safe txids can still be acked and replaced with new txid requests. -unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx :: (String -> IO ()) -> Assertion -unit_nextPeerAction_acksSafePrefixBeforeBlockedBufferedTx step = do - step "Run nextPeerAction with an ackable retained tx followed by a blocked buffered tx" - case peerAction of - PeerRequestTxIds _ txIdsToAcknowledge txIdsToReq -> do - step "Assert only the safe prefix is acknowledged" - txIdsToAcknowledge @?= 1 - assertBool ("expected positive txIdsToReq, got " ++ show txIdsToReq) (txIdsToReq > 0) - peerUnacknowledgedTxIds peerState' @?= StrictSeq.singleton blockedKey - peerRequestedTxIds peerState' @?= txIdsToReq - txAdvertiserCount (lookupEntryOrFail blockedKey sharedState') @?= - txAdvertiserCount blockedEntry - other -> - assertFailure ("unexpected action: " ++ show other) - where - peeraddr = 7 - submittingPeer = 8 - resolvedTxid = 1 - blockedTxid = 2 - blockedSize = mkSize (Positive 10) - resolvedKey = TxKey 1 - blockedKey = TxKey 2 - kResolved = unTxKey resolvedKey - kBlocked = unTxKey blockedKey - blockedTx = mkTx blockedTxid blockedSize - blockedEntry = TxEntry - { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertiserCount = 2 - , txAttempts = Map.fromList - [ (peeraddr, TxBuffered) - , (submittingPeer, TxSubmitting) - ] - , currentMaxInflightMultiplicity = - txInflightMultiplicity defaultTxDecisionPolicy - } - sharedState0 = emptySharedTxState - { sharedPeers = Map.fromList - [ (peeraddr, withAdvertisedTxKeys [blockedKey] - (mkSharedPeerState)) - , (submittingPeer, withAdvertisedTxKeys [blockedKey] - (mkSharedPeerState)) - ] - , sharedTxTable = IntMap.singleton kBlocked blockedEntry - , sharedRetainedTxs = retainedSingleton kResolved (addTime 17 now) - , sharedTxIdToKey = Map.fromList - [ (getRawTxId resolvedTxid, resolvedKey) - , (getRawTxId blockedTxid, blockedKey) - ] - , sharedKeyToTxId = IntMap.fromList - [ (kResolved, resolvedTxid) - , (kBlocked, blockedTxid) - ] - , sharedNextTxKey = 3 - } - peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.fromList [resolvedKey, blockedKey] - , peerDownloadedTxs = IntMap.singleton kBlocked blockedTx - } - (peerAction, peerState', sharedState') = - nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 - --- Verifies that nextPeerAction keeps non-owner txids unacknowledged until --- the tx has resolved out of the active table. -prop_nextPeerAction_nonOwnerWaitsUntilResolved - :: ArbTxDecisionPolicy - -> Positive Int - -> Positive Int - -> TxId - -> Property -prop_nextPeerAction_nonOwnerWaitsUntilResolved (ArbTxDecisionPolicy policy) (Positive owner0) (Positive peeraddr0) txid0 = - owner /= peeraddr ==> - conjoin - [ case unresolvedAction of - PeerDoNothing _ _ -> - unresolvedExpectations - PeerRequestTxIds _ txIdsToAcknowledge _ -> - conjoin - [ txIdsToAcknowledge === 0 - , unresolvedExpectations - ] - _ -> counterexample ("unexpected unresolved action: " ++ show unresolvedAction) False - , case resolvedAction of - PeerRequestTxIds _ txIdsToAcknowledge _ -> - conjoin - [ txIdsToAcknowledge === 1 - , peerUnacknowledgedTxIds resolvedPeerState' === StrictSeq.empty - , checkNoThunks "resolvedPeerState'" (resolvedPeerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "resolvedSharedState'" resolvedSharedState' - ] - _ -> counterexample ("unexpected resolved action: " ++ show resolvedAction) False - ] - where - owner = owner0 + 1000 - peeraddr = peeraddr0 + 2000 - txid = abs txid0 + 1 - key = TxKey 0 - k = unTxKey key - sharedPeers0 = Map.fromList - [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState)) - , (peeraddr, withAdvertisedTxKeys [key] (mkSharedPeerState)) - ] - unresolvedSharedState = emptySharedTxState - { sharedPeers = sharedPeers0 - , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxClaimable (Time 0) - , txAdvertiserCount = 2 - , txAttempts = Map.singleton owner TxBuffered - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - } - , sharedTxIdToKey = Map.singleton (getRawTxId txid) key - , sharedKeyToTxId = IntMap.singleton k txid - , sharedNextTxKey = 1 - } - resolvedSharedState = unresolvedSharedState - { sharedTxTable = IntMap.empty - , sharedRetainedTxs = retainedSingleton k (addTime 17 now) - } - peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerRequestedTxIds = 0 - } - (unresolvedAction, unresolvedPeerState', unresolvedSharedState') = nextPeerAction now policy peeraddr peerState0 unresolvedSharedState - unresolvedExpectations = - conjoin - [ peerUnacknowledgedTxIds unresolvedPeerState' === peerUnacknowledgedTxIds peerState0 - , txAdvertiserCount (lookupEntryOrFail key unresolvedSharedState') === - txAdvertiserCount (lookupEntryOrFail key unresolvedSharedState) - , checkNoThunks "unresolvedPeerState'" (unresolvedPeerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "unresolvedSharedState'" unresolvedSharedState' - ] - (resolvedAction, resolvedPeerState', resolvedSharedState') = nextPeerAction now policy peeraddr peerState0 resolvedSharedState - --- Verifies that nextPeerActionPipelined does nothing when it can only --- acknowledge txids and cannot request new ones in the same step. -prop_nextPeerActionPipelined_requiresAckAndReq - :: ArbTxDecisionPolicy - -> Positive Int - -> TxId - -> Positive Int - -> Property -prop_nextPeerActionPipelined_requiresAckAndReq (ArbTxDecisionPolicy policy) (Positive peeraddr) txid0 _txSize0 = - case peerAction of - PeerDoNothing _ _ -> - conjoin - [ peerUnacknowledgedTxIds peerState' === peerUnacknowledgedTxIds peerState0 - , sharedState' === sharedState0 - , checkNoThunks "peerState'" (peerState' :: PeerTxLocalState (Tx TxId)) - , checkNoThunks "sharedState'" sharedState' - ] - _ -> counterexample ("unexpected pipelined action: " ++ show peerAction) False - where - txid = abs txid0 + 1 - key = TxKey 0 - k = unTxKey key - peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerRequestedTxIds = maxNumTxIdsToRequest policy - } - sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState) - , sharedRetainedTxs = retainedSingleton k (addTime 17 now) - , sharedTxIdToKey = Map.singleton (getRawTxId txid) key - , sharedKeyToTxId = IntMap.singleton k txid - , sharedNextTxKey = 1 - } - (peerAction, peerState', sharedState') = nextPeerActionPipelined now policy peeraddr peerState0 sharedState0 - --- Verifies that nextPeerActionPipelined requests txids once it can both --- acknowledge old txids and ask for more. -prop_nextPeerActionPipelined_requestsTxIds - :: ArbTxDecisionPolicy - -> Positive Int - -> TxId - -> Positive Int - -> Property -prop_nextPeerActionPipelined_requestsTxIds (ArbTxDecisionPolicy policy) (Positive peeraddr) txid0 _txSize0 = - case peerAction of - PeerRequestTxIds _ txIdsToAcknowledge txIdsToReq -> - conjoin - [ txIdsToAcknowledge === 1 - , counterexample ("expected positive txIdsToReq, got " ++ show txIdsToReq) (txIdsToReq > 0) - , peerUnacknowledgedTxIds peerState' === StrictSeq.empty - , peerRequestedTxIds peerState' === txIdsToReq - , sharedRetainedTxs sharedState' === sharedRetainedTxs sharedState0 - , sharedTxTable sharedState' === sharedTxTable sharedState0 - , sharedTxIdToKey sharedState' === sharedTxIdToKey sharedState0 - , sharedKeyToTxId sharedState' === sharedKeyToTxId sharedState0 - , sharedGeneration sharedState' === sharedGeneration sharedState0 - ] - _ -> counterexample ("unexpected pipelined action: " ++ show peerAction) False - where - txid = abs txid0 + 1 - key = TxKey 0 - k = unTxKey key - peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.singleton key - , peerRequestedTxIds = 0 - } - sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState) - , sharedRetainedTxs = retainedSingleton k (addTime 17 now) - , sharedTxIdToKey = Map.singleton (getRawTxId txid) key - , sharedKeyToTxId = IntMap.singleton k txid - , sharedNextTxKey = 1 - } - (peerAction, peerState', sharedState') = nextPeerActionPipelined now policy peeraddr peerState0 sharedState0 - -unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply - :: (String -> IO ()) - -> Assertion -unit_nextPeerActionPipelined_keepsOneUnackedWithOutstandingBodyReply step = do - step "Run nextPeerActionPipelined with three ackable txids and one outstanding body batch" - case peerAction of - PeerRequestTxIds _ txIdsToAcknowledge txIdsToReq -> do - step "Assert pipelined txid requests keep one txid unacked while a body reply is still in flight" - txIdsToAcknowledge @?= 2 - assertBool ("expected positive txIdsToReq, got " ++ show txIdsToReq) (txIdsToReq > 0) - peerUnacknowledgedTxIds peerState' @?= StrictSeq.singleton keyC - peerRequestedTxIds peerState' @?= txIdsToReq - peerRequestedTxBatches peerState' @?= peerRequestedTxBatches peerState0 - sharedState' @?= sharedState0 - _ -> - assertFailure ("unexpected pipelined action: " ++ show peerAction) - where - peeraddr :: PeerAddr - peeraddr = 7 - txidA, txidB, txidC :: TxId - txidA = 1 - txidB = 2 - txidC = 3 - keyA = TxKey 0 - keyB = TxKey 1 - keyC = TxKey 2 - requestedBatch = mkRequestedTxBatch [keyA] 11 - peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.fromList [keyA, keyB, keyC] - , peerRequestedTxs = IntSet.singleton (unTxKey keyA) - , peerRequestedTxBatches = StrictSeq.singleton requestedBatch - , peerRequestedTxsSize = requestedTxBatchSize requestedBatch - } - sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState) - , sharedRetainedTxs = - retainedFromList - [ (unTxKey keyA, addTime 17 now) - , (unTxKey keyB, addTime 17 now) - , (unTxKey keyC, addTime 17 now) - ] - , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB), (getRawTxId txidC, keyC)] - , sharedKeyToTxId = - IntMap.fromList - [ (unTxKey keyA, txidA) - , (unTxKey keyB, txidB) - , (unTxKey keyC, txidC) - ] - , sharedNextTxKey = 3 - } - (peerAction, peerState', sharedState') = - nextPeerActionPipelined now defaultTxDecisionPolicy peeraddr peerState0 sharedState0 - --- Verifies that nextPeerActionPipelined opens a second outstanding body --- batch when another downloadable tx is available. -prop_nextPeerActionPipelined_secondBodyBatch - :: ArbTxDecisionPolicy - -> Positive Int - -> TxId - -> TxId - -> Positive Int - -> Positive Int - -> Property -prop_nextPeerActionPipelined_secondBodyBatch (ArbTxDecisionPolicy basePolicy) (Positive peeraddr) txidA0 txidB0 txSizeA0 txSizeB0 = - txidA /= txidB ==> - peerTxLocalStateInvariant policy peerState0 .&&. - case peerAction of - PeerRequestTxs [txKey] -> - conjoin - [ txKey === keyB - , peerRequestedTxs peerState' === IntSet.fromList [kA, kB] - , StrictSeq.length (peerRequestedTxBatches peerState') === 2 - , peerRequestedTxsSize peerState' === txSizeA + txSizeB - , fmap txLease (IntMap.lookup kB (sharedTxTable sharedState')) === - Just (TxLeased peeraddr (addTime (interTxSpace policy) now)) - , fmap (Map.lookup peeraddr . txAttempts) - (IntMap.lookup kB (sharedTxTable sharedState')) === - Just (Just TxDownloading) - ] - _ -> counterexample ("unexpected pipelined action: " ++ show peerAction) False - where - txidA = abs txidA0 + 1 - txidB = abs txidB0 + 2 - txSizeA = mkSize txSizeA0 - txSizeB = mkSize txSizeB0 - keyA = TxKey 0 - keyB = TxKey 1 - kA = unTxKey keyA - kB = unTxKey keyB - peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.fromList [keyA, keyB] - , peerAvailableTxIds = IntMap.fromList [(kA, txSizeA), (kB, txSizeB)] - , peerRequestedTxs = IntSet.singleton kA - , peerRequestedTxBatches = StrictSeq.singleton (mkRequestedTxBatch [keyA] txSizeA) - , peerRequestedTxsSize = txSizeA - } - sharedState0 = emptySharedTxState - { sharedPeers = - Map.singleton peeraddr - (withAdvertisedTxKeys [keyA, keyB] (mkSharedPeerState)) - , sharedTxTable = IntMap.fromList - [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading) policy) - , (kB, TxEntry - { txLease = TxClaimable (Time 0) - , txAdvertiserCount = 1 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - }) - ] - , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB)] - , sharedKeyToTxId = IntMap.fromList [(kA, txidA), (kB, txidB)] - , sharedNextTxKey = 2 - } - policy = basePolicy - { maxOutstandingTxBatchesPerPeer = max 2 (maxOutstandingTxBatchesPerPeer basePolicy) - , txsSizeInflightPerPeer = max (txSizeA + txSizeB) - (txsSizeInflightPerPeer basePolicy) - } - (peerAction, peerState', sharedState') = nextPeerActionPipelined now policy peeraddr peerState0 sharedState0 - --- Verifies that nextPeerActionPipelined does not open a third outstanding --- body batch once the per-peer batch limit is reached. -prop_nextPeerActionPipelined_noThirdBodyBatch - :: ArbTxDecisionPolicy - -> Positive Int - -> TxId - -> TxId - -> TxId - -> Positive Int - -> Positive Int - -> Positive Int - -> Property -prop_nextPeerActionPipelined_noThirdBodyBatch (ArbTxDecisionPolicy basePolicy) (Positive peeraddr) txidA0 txidB0 txidC0 txSizeA0 txSizeB0 txSizeC0 = - distinctTxIds ==> - peerTxLocalStateInvariant policy peerState0 .&&. - case peerAction of - PeerDoNothing _ _ -> - conjoin - [ peerRequestedTxs peerState' === peerRequestedTxs peerState0 - , peerRequestedTxBatches peerState' === peerRequestedTxBatches peerState0 - , peerRequestedTxsSize peerState' === peerRequestedTxsSize peerState0 - , sharedState' === sharedState0 - ] - _ -> counterexample ("unexpected pipelined action: " ++ show peerAction) False - where - txidA = abs txidA0 + 1 - txidB = abs txidB0 + 2 - txidC = abs txidC0 + 3 - distinctTxIds = length (nub [txidA, txidB, txidC]) == 3 - txSizeA = mkSize txSizeA0 - txSizeB = mkSize txSizeB0 - txSizeC = mkSize txSizeC0 - keyA = TxKey 0 - keyB = TxKey 1 - keyC = TxKey 2 - kA = unTxKey keyA - kB = unTxKey keyB - kC = unTxKey keyC - peerState0 = emptyPeerTxLocalState - { peerUnacknowledgedTxIds = StrictSeq.fromList [keyA, keyB, keyC] - , peerAvailableTxIds = IntMap.fromList [(kA, txSizeA), (kB, txSizeB), (kC, txSizeC)] - , peerRequestedTxs = IntSet.fromList [kA, kB] - , peerRequestedTxBatches = StrictSeq.fromList - [ mkRequestedTxBatch [keyA] txSizeA - , mkRequestedTxBatch [keyB] txSizeB - ] - , peerRequestedTxsSize = txSizeA + txSizeB - } - sharedState0 = emptySharedTxState - { sharedPeers = - Map.singleton peeraddr - (withAdvertisedTxKeys [keyA, keyB, keyC] (mkSharedPeerState)) - , sharedTxTable = IntMap.fromList - [ (kA, mkTxEntry peeraddr txSizeA (Just TxDownloading) policy) - , (kB, mkTxEntry peeraddr txSizeB (Just TxDownloading) policy) - , (kC, TxEntry - { txLease = TxClaimable (Time 0) - , txAdvertiserCount = 1 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - }) - ] - , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB), (getRawTxId txidC, keyC)] - , sharedKeyToTxId = IntMap.fromList [(kA, txidA), (kB, txidB), (kC, txidC)] - , sharedNextTxKey = 3 - } - (peerAction, peerState', sharedState') = nextPeerActionPipelined now policy peeraddr peerState0 sharedState0 - policy = basePolicy { maxOutstandingTxBatchesPerPeer = 2 } - --- Verifies that nextPeerAction prunes expired retained txs and removes their --- tx-key mappings while the peer is idle. -prop_nextPeerAction_prunesExpiredRetained - :: ArbTxDecisionPolicy - -> Positive Int - -> TxId - -> Positive Int - -> Property -prop_nextPeerAction_prunesExpiredRetained (ArbTxDecisionPolicy policy) (Positive peeraddr) txid0 _txSize0 = - case peerAction of - PeerDoNothing _ Nothing -> - conjoin - [ peerState' === idlePeerState - , IntMap.lookup k (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) - , retainedLookup k (sharedRetainedTxs sharedState') === Nothing - , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Nothing - , IntMap.lookup k (sharedKeyToTxId sharedState') === Nothing - , sharedGeneration sharedState' === sharedGeneration sharedState0 + 1 - ] - _ -> counterexample ("unexpected peer action: " ++ show peerAction) False - where - txid = abs txid0 + 1 - key = TxKey 0 - k = unTxKey key - idlePeerState :: PeerTxLocalState (Tx TxId) - idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest policy } - sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState) - , sharedRetainedTxs = retainedSingleton k now - , sharedTxIdToKey = Map.singleton (getRawTxId txid) key - , sharedKeyToTxId = IntMap.singleton k txid - , sharedNextTxKey = max 1 (k + 1) - } - -- The central counters thread sweeps expired retained entries; emulate - -- that by calling the same helper before evaluating the peer decision. - sweptState = sweepSharedState now sharedState0 - (peerAction, peerState', sharedState') = nextPeerAction now policy peeraddr idlePeerState sweptState - --- Verifies that nextPeerAction keeps unexpired retained txs and returns the --- wake delay until their expiry. -prop_nextPeerAction_keepsRetained - :: ArbTxDecisionPolicy - -> Positive Int - -> TxId - -> Positive Int - -> Property -prop_nextPeerAction_keepsRetained (ArbTxDecisionPolicy policy) (Positive peeraddr) txid0 _txSize0 = - case peerAction of - PeerDoNothing _ (Just wakeDelay) -> - conjoin - [ peerState' === idlePeerState - , IntMap.lookup k (sharedTxTable sharedState') === (Nothing :: Maybe (TxEntry PeerAddr)) - , retainedLookup k (sharedRetainedTxs sharedState') === Just retainUntil - , Map.lookup (getRawTxId txid) (sharedTxIdToKey sharedState') === Just key - , IntMap.lookup k (sharedKeyToTxId sharedState') === Just txid - , wakeDelay === diffTime retainUntil now - , sharedGeneration sharedState' === sharedGeneration sharedState0 - ] - _ -> counterexample ("unexpected peer action: " ++ show peerAction) False - where - txid = abs txid0 + 1 - retainUntil = addTime 17 now - key = TxKey 0 - k = unTxKey key - idlePeerState :: PeerTxLocalState (Tx TxId) - idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest policy } - sharedState0 = emptySharedTxState - { sharedPeers = Map.singleton peeraddr (mkSharedPeerState) - , sharedRetainedTxs = retainedSingleton k retainUntil - , sharedTxIdToKey = Map.singleton (getRawTxId txid) key - , sharedKeyToTxId = IntMap.singleton k txid - , sharedNextTxKey = 1 - } - (peerAction, peerState', sharedState') = nextPeerAction now policy peeraddr idlePeerState sharedState0 + missingAcks = expectedAcked `IntSet.difference` allAcked --- Verifies that PeerDoNothing waits until the earliest shared expiry, whether --- it comes from a lease or a retained tx. -prop_nextPeerAction_earliestWakeDelay - :: ArbTxDecisionPolicy - -> Positive Int - -> Positive Int - -> TxId - -> TxId - -> Positive Int - -> Positive Int - -> Property -prop_nextPeerAction_earliestWakeDelay (ArbTxDecisionPolicy policy) (Positive peeraddr) (Positive owner0) txidA0 txidB0 _txSizeA0 _txSizeB0 = - peeraddr /= owner ==> - conjoin - [ case leaseFirstAction of - PeerDoNothing _ (Just wakeDelay) -> wakeDelay === diffTime leaseUntil now - _ -> counterexample ("unexpected lease-first action: " ++ show leaseFirstAction) False - , case retainFirstAction of - PeerDoNothing _ (Just wakeDelay) -> wakeDelay === diffTime retainUntilSoon now - _ -> counterexample ("unexpected retain-first action: " ++ show retainFirstAction) False - ] - where - owner = owner0 + 1000 - txidA = abs txidA0 + 1 - txidB = abs txidB0 + 2 - leaseUntil = addTime 11 now - retainUntilLater = addTime 29 now - leaseUntilLater = addTime 31 now - retainUntilSoon = addTime 13 now - keyA = TxKey 0 - keyB = TxKey 1 - idlePeerState = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest policy } - sharedPeers0 = Map.fromList - [ (peeraddr, withAdvertisedTxKeys [keyA] (mkSharedPeerState)) - , (owner, withAdvertisedTxKeys [keyA] (mkSharedPeerState)) - ] - leaseFirstState = emptySharedTxState - { sharedPeers = sharedPeers0 - , sharedTxTable = IntMap.singleton (unTxKey keyA) TxEntry - { txLease = TxLeased owner leaseUntil - , txAdvertiserCount = 2 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - } - , sharedRetainedTxs = retainedSingleton (unTxKey keyB) retainUntilLater - , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB)] - , sharedKeyToTxId = IntMap.fromList [(unTxKey keyA, txidA), (unTxKey keyB, txidB)] - , sharedNextTxKey = 2 - } - retainFirstState = emptySharedTxState - { sharedPeers = sharedPeers0 - , sharedTxTable = IntMap.singleton (unTxKey keyA) TxEntry - { txLease = TxLeased owner leaseUntilLater - , txAdvertiserCount = 2 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - } - , sharedRetainedTxs = retainedSingleton (unTxKey keyB) retainUntilSoon - , sharedTxIdToKey = Map.fromList [(getRawTxId txidA, keyA), (getRawTxId txidB, keyB)] - , sharedKeyToTxId = IntMap.fromList [(unTxKey keyA, txidA), (unTxKey keyB, txidB)] - , sharedNextTxKey = 2 - } - (leaseFirstAction, _, _) = nextPeerAction now policy peeraddr idlePeerState leaseFirstState - (retainFirstAction, _, _) = nextPeerAction now policy peeraddr idlePeerState retainFirstState + pickEarliest schedule = + case sortBy (compare `on` snd) + [ (p, t) | (p, (Just t, _, _)) <- Map.toList schedule ] of + [] -> Nothing + (p, t) : _ -> + let (_, ps, pif) = schedule Map.! p in + Just (p, t, ps, pif) --- Verifies that PeerDoNothing reports the current generation of the acting --- peer. -prop_nextPeerAction_returnsPeerGeneration - :: ArbTxDecisionPolicy - -> Positive Int - -> Property -prop_nextPeerAction_returnsPeerGeneration (ArbTxDecisionPolicy policy) (Positive peeraddr) = - case peerAction of - PeerDoNothing generation Nothing -> generation === expectedGeneration - _ -> counterexample ("unexpected peer action: " ++ show peerAction) False - where - expectedGeneration = 7 - sharedState0 :: SharedTxState PeerAddr TxId - sharedState0 = emptySharedTxState - { sharedPeers = Map.fromList - [ ( peeraddr - , (mkSharedPeerState) - { sharedPeerGeneration = expectedGeneration - } - ) - , ( peeraddr + 1000 - , (mkSharedPeerState) - { sharedPeerGeneration = 11 - } - ) - ] - } - peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = maxNumTxIdsToRequest policy } - (peerAction, _, _) = nextPeerAction now policy peeraddr peerState0 sharedState0 + reactivateOthers acting time = + Map.mapWithKey $ \p (status, ps, pif) -> + if p == acting + then (status, ps, pif) + else case status of + Just t' | t' <= time -> (Just t', ps, pif) + _ -> (Just time, ps, pif) --- Verifies that handleSubmittedTxs bumps the generation of every other --- advertiser of the resolved tx, regardless of phase, while leaving the --- submitting peer's own generation unchanged. -prop_handleSubmittedTxs_bumpsAdvertisers - :: ArbTxDecisionPolicy - -> Positive Int - -> Positive Int - -> Positive Int - -> TxId - -> Positive Int - -> Property -prop_handleSubmittedTxs_bumpsAdvertisers (ArbTxDecisionPolicy policy) (Positive owner0) (Positive peerA0) (Positive peerB0) txid0 txSize0 = - owner /= peerA && owner /= peerB && peerA /= peerB ==> - conjoin - [ sharedPeerGeneration (lookupPeerOrFail peerA sharedState') === 1 - , sharedPeerGeneration (lookupPeerOrFail peerB sharedState') === 1 - , sharedPeerGeneration (lookupPeerOrFail owner sharedState') === 0 - ] - where - owner = owner0 + 1000 - peerA = peerA0 + 2000 - peerB = peerB0 + 3000 - txid = abs txid0 + 1 - txSize = mkSize txSize0 - tx = mkTx txid txSize - key = TxKey 0 - k = unTxKey key - sharedState0 = emptySharedTxState - { sharedPeers = Map.fromList - [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState)) - , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState)) - , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState)) - ] - , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxLeased owner (addTime 10 now) - , txAdvertiserCount = 3 - , txAttempts = Map.singleton owner TxBuffered - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - } - , sharedTxIdToKey = Map.singleton (getRawTxId txid) key - , sharedKeyToTxId = IntMap.singleton k txid - , sharedNextTxKey = 1 - } - peerState0 = emptyPeerTxLocalState { peerDownloadedTxs = IntMap.singleton k tx } - (_, sharedState') = handleSubmittedTxs now policy owner [key] [] peerState0 sharedState0 - -unit_advertisingPeersForTxExcept_scansAuthoritativePeerSets :: (String -> IO ()) -> Assertion -unit_advertisingPeersForTxExcept_scansAuthoritativePeerSets step = do - step "Build a shared state whose per-tx advertiser count is stale-low" - let advertisingPeers = - advertisingPeersForTxExcept owner key sharedState0 - step "Assert all peers advertising the key are found from the authoritative per-peer key sets" - advertisingPeers @?= Set.fromList [peerA, peerB] - where - owner, peerA, peerB, unrelatedPeer :: PeerAddr - owner = 1 - peerA = 2 - peerB = 3 - unrelatedPeer = 4 - txid :: TxId - txid = 1 - baseState = mkSharedState [txid] - key = lookupKeyOrFail txid baseState - k = unTxKey key - sharedState0 = baseState - { sharedPeers = Map.fromList - [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState)) - , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState)) - , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState)) - , (unrelatedPeer, mkSharedPeerState) - ] - , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxLeased owner (addTime 10 now) - , txAdvertiserCount = 1 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = - txInflightMultiplicity defaultTxDecisionPolicy - } - } + mkBody :: SharedTxState PeerAddr TxId + -> PeerTxLocalState (Tx TxId) + -> TxKey + -> (TxId, Tx TxId) + mkBody ss ps (TxKey k) = + let txid = sharedKeyToTxId ss IntMap.! k + size = peerAvailableTxIds ps IntMap.! k in + (txid, mkTx txid size) -unit_removeAdvertisingPeersForResolvedTx_clearsAllAdvertisingPeers :: (String -> IO ()) -> Assertion -unit_removeAdvertisingPeersForResolvedTx_clearsAllAdvertisingPeers step = do - step "Remove a resolved tx key from all advertising peers" - let (sharedState', advertisers) = - removeAdvertisingPeersForResolvedTx key sharedState0 - step "Assert all advertising peers are returned even when the cached count is stale-low" - advertisers @?= Set.fromList [owner, peerA, peerB] - step "Assert the resolved key is cleared from every advertising peer and unrelated peers are unchanged" - sharedPeerAdvertisedTxKeys (lookupPeerOrFail owner sharedState') @?= IntSet.empty - sharedPeerAdvertisedTxKeys (lookupPeerOrFail peerA sharedState') @?= IntSet.empty - sharedPeerAdvertisedTxKeys (lookupPeerOrFail peerB sharedState') @?= IntSet.empty - sharedPeerAdvertisedTxKeys (lookupPeerOrFail unrelatedPeer sharedState') @?= IntSet.empty - sharedTxTable sharedState' @?= sharedTxTable sharedState0 - where - owner, peerA, peerB, unrelatedPeer :: PeerAddr - owner = 1 - peerA = 2 - peerB = 3 - unrelatedPeer = 4 - txid :: TxId - txid = 1 - baseState = mkSharedState [txid] - key = lookupKeyOrFail txid baseState - k = unTxKey key - sharedState0 = baseState - { sharedPeers = Map.fromList - [ (owner, withAdvertisedTxKeys [key] (mkSharedPeerState)) - , (peerA, withAdvertisedTxKeys [key] (mkSharedPeerState)) - , (peerB, withAdvertisedTxKeys [key] (mkSharedPeerState)) - , (unrelatedPeer, mkSharedPeerState) - ] - , sharedTxTable = IntMap.singleton k TxEntry - { txLease = TxLeased owner (addTime 10 now) - , txAdvertiserCount = 1 - , txAttempts = Map.empty - , currentMaxInflightMultiplicity = - txInflightMultiplicity defaultTxDecisionPolicy - } - } + runLoop ss schedule pending subs reqs acks invs i lastTime + | i >= maxIters = + (subs, reqs, acks, reverse invs, False, i) + | otherwise = + case pickEarliest schedule of + Nothing -> + (subs, reqs, acks, reverse invs, True, i) + Just (p, time, ps, pif) -> + let lastTime' = max lastTime time --- Generate a shared peer state. -genSharedPeerState :: Gen SharedPeerState -genSharedPeerState = do - sharedPeerGeneration <- genSmallWord64 - pure SharedPeerState { - sharedPeerAdvertisedTxKeys = IntSet.empty, - sharedPeerGeneration - } + -- Snapshot of every peer's @(peerLocal, peerInFlight)@ + -- with @p@'s entry overridden to the supplied pair. + peerSnapshot pSnap psSnap pifSnap = + Map.insert pSnap (psSnap, pifSnap) + (Map.map (\(_, ps_, pif_) -> (ps_, pif_)) schedule) + + -- Drain p's pending body deliveries before its action. + (psPre, pifPre, ssPre, pendingPre, drainInvs, stepDrain) = + case Map.lookup p pending of + Nothing -> (ps, pif, ss, pending, [], i) + Just deliveries -> + let (_, _, ps2, pif2, ss2) = + handleReceivedTxs (const False) time policy p + deliveries ps pif ss + stepD = i + 1 + drainInv = conjoin + [ combinedStateInvariant policy + StrongInvariant + (peerSnapshot p ps2 pif2) + ss2 + , checkNoThunks + ("peerState after drain (peer " + ++ show p ++ ", step " ++ show stepD ++ ")") + ps2 + , checkNoThunks + ("sharedState after drain (peer " + ++ show p ++ ", step " ++ show stepD ++ ")") + ss2 + ] in + ( ps2, pif2, ss2, Map.delete p pending + , [(stepD, p, drainInv)], stepD ) + + (action, ps', pif', ss') = + nextPeerAction time policy p psPre pifPre ssPre + oldUnacked = peerUnacknowledgedTxIds psPre + newUnacked = peerUnacknowledgedTxIds ps' + numAcked = StrictSeq.length oldUnacked + - StrictSeq.length newUnacked + ackedNow = IntSet.fromList $ map unTxKey + $ toList (StrictSeq.take numAcked oldUnacked) + step = stepDrain + 1 + inv = conjoin + [ combinedStateInvariant policy StrongInvariant + (peerSnapshot p ps' pif') ss' + , checkNoThunks + ("peerState' (peer " ++ show p + ++ ", step " ++ show step ++ ")") + ps' + , checkNoThunks + ("sharedState' (peer " ++ show p + ++ ", step " ++ show step ++ ")") + ss' + ] in + case action of + PeerDoNothing _ Nothing -> + let schedule' = Map.insert p (Nothing, ps', pif') schedule in + runLoop ss' schedule' pendingPre subs reqs acks + ((step, p, inv) : drainInvs ++ invs) step lastTime' + PeerDoNothing _ (Just delay) -> + let nextWake = addTime (max delay 0.001) time + schedule' = Map.insert p (Just nextWake, ps', pif') schedule in + runLoop ss' schedule' pendingPre subs reqs acks + ((step, p, inv) : drainInvs ++ invs) step lastTime' + PeerSubmitTxs ks -> + let (ps'', pif'', ss'') = + handleSubmittedTxs time policy p ks [] ps' pif' ss' + postInv = conjoin + [ combinedStateInvariant policy StrongInvariant + (peerSnapshot p ps'' pif'') ss'' + , checkNoThunks + ("peerState'' (peer " ++ show p + ++ ", step " ++ show step ++ ")") + ps'' + , checkNoThunks + ("sharedState'' (peer " ++ show p + ++ ", step " ++ show step ++ ")") + ss'' + ] + others' = reactivateOthers p time schedule + schedule' = Map.insert p (Just time, ps'', pif'') others' + subs' = IntSet.union subs + (IntSet.fromList (unTxKey <$> ks)) in + runLoop ss'' schedule' pendingPre subs' reqs acks + ( (step, p, postInv) : (step, p, inv) + : drainInvs ++ invs ) step lastTime' + PeerRequestTxs ks -> + let bodies = [ mkBody ssPre psPre k | k <- ks ] + pending' = Map.insert p bodies pendingPre + others' = reactivateOthers p time schedule + schedule' = Map.insert p (Just time, ps', pif') others' + reqs' = IntSet.union reqs + (IntSet.fromList (unTxKey <$> ks)) in + runLoop ss' schedule' pending' subs reqs' acks + ((step, p, inv) : drainInvs ++ invs) step lastTime' + PeerRequestTxIds{} -> + let others' = reactivateOthers p time schedule + schedule' = Map.insert p (Just time, ps', pif') others' + acks' = IntSet.union acks ackedNow in + runLoop ss' schedule' pendingPre subs reqs acks' + ((step, p, inv) : drainInvs ++ invs) step lastTime' -- Generate a self-consistent local peer view of requested, available, and downloaded txs. genPeerTxLocalState :: Gen (PeerTxLocalState (Tx TxId)) @@ -3840,28 +2725,21 @@ genPeerTxLocalState = sized $ \n -> do txSize <- genPositiveSize pure (unTxKey key, mkTx (txIdForKey key) txSize) -newtype PeerSeed = PeerSeed { peerSeedGeneration :: Word64 } - -data PeerDerivedUsage = PeerDerivedUsage { - peerHasSubmitting :: !Bool - , peerHasRequestedTxs :: !Bool - } - -- Generate a shared tx state with distinct active and retained entries. +-- +-- Per-peer state is no longer carried in 'SharedTxState'; tests that +-- exercise multi-peer behaviour pair this generator with a separate +-- 'PeerTxLocalState' for the peer under test. genSharedTxState :: Gen (SharedTxState PeerAddr TxId) genSharedTxState = sized $ \n -> do let maxPeers = min 6 (n + 1) maxActiveTxs = min 8 (n + 2) maxRetainedTxs = min 6 (n + 2) - numPeers <- chooseInt (0, maxPeers) + numPeers <- chooseInt (1, max 1 maxPeers) peeraddrs <- genDistinctPositiveInts numPeers - peerSeeds <- Map.fromList <$> mapM genPeerSeedEntry peeraddrs - numActiveTxs <- - if null peeraddrs - then pure 0 - else chooseInt (0, maxActiveTxs) + numActiveTxs <- chooseInt (0, maxActiveTxs) numRetainedTxs <- chooseInt (0, maxRetainedTxs) txids <- genDistinctPositiveInts (numActiveTxs + numRetainedTxs) @@ -3871,92 +2749,59 @@ genSharedTxState = sized $ \n -> do retainedEntries <- mapM genRetainedEntry retainedTxIds sharedGeneration <- genSmallWord64 - pure $ buildSharedTxState peerSeeds activeEntries retainedEntries sharedGeneration + pure $ buildSharedTxState activeEntries retainedEntries sharedGeneration where - genPeerSeedEntry peeraddr = do - peerSeedGeneration <- genSmallWord64 - pure (peeraddr, PeerSeed { peerSeedGeneration }) - genRetainedEntry txid = do retainUntil <- genSharedExpiryTime pure (txid, retainUntil) -- Generate one active tx entry using a mix of leased and claimable shapes. -genActiveTxEntry :: [PeerAddr] -> TxId -> Gen (TxId, Set.Set PeerAddr, TxEntry PeerAddr) +genActiveTxEntry :: [PeerAddr] -> TxId -> Gen (TxId, TxEntry PeerAddr) genActiveTxEntry peeraddrs txid = do - (txAdvertisers, txEntry) <- frequency - [ (5, genLeasedTxEntry peeraddrs txid) - , (3, genClaimableTxEntry peeraddrs txid) + txEntry <- frequency + [ (5, genLeasedTxEntry peeraddrs) + , (3, genClaimableTxEntry) ] - pure (txid, txAdvertisers, txEntry) - --- Generate a leased entry where the owner may already be downloading, buffered, or submitting. -genLeasedTxEntry :: [PeerAddr] -> TxId -> Gen (Set.Set PeerAddr, TxEntry PeerAddr) -genLeasedTxEntry peeraddrs _txid = do - advertiserPeers <- genNonEmptySublist peeraddrs - owner <- elements advertiserPeers - txAdvertisers <- genOwnedAdvertisers advertiserPeers owner + pure (txid, txEntry) + +-- Generate a leased entry where the owner may also be in submission. +genLeasedTxEntry :: [PeerAddr] -> Gen (TxEntry PeerAddr) +genLeasedTxEntry peeraddrs = do + owner <- elements peeraddrs txLease <- TxLeased owner <$> genSharedExpiryTime - ownerAttempt <- frequency - [ (2, pure Nothing) - , (2, Just <$> elements [TxDownloading, TxBuffered]) - , (1, pure (Just TxSubmitting)) - ] - pure - ( txAdvertisers - , TxEntry { - txLease, - txAdvertiserCount = Set.size txAdvertisers, - txAttempts = maybe Map.empty (Map.singleton owner) ownerAttempt, - currentMaxInflightMultiplicity = - txInflightMultiplicity defaultTxDecisionPolicy - } - ) + inSub <- frequency [(2, pure False), (1, pure True)] + pure TxEntry { + txLease, + txAttempt = if inSub then 0 else 1, + txInSubmission = inSub, + currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy + } --- Generate a claimable entry advertised by one or more resolved peers. -genClaimableTxEntry :: [PeerAddr] -> TxId -> Gen (Set.Set PeerAddr, TxEntry PeerAddr) -genClaimableTxEntry peeraddrs _txid = do - advertiserPeers <- genNonEmptySublist peeraddrs - txAdvertisers <- genResolvedAdvertisers advertiserPeers +-- Generate a claimable entry with no in-flight attempt. +genClaimableTxEntry :: Gen (TxEntry PeerAddr) +genClaimableTxEntry = do claimableAt <- genSharedExpiryTime - pure - ( txAdvertisers - , TxEntry { - txLease = TxClaimable claimableAt, - txAdvertiserCount = Set.size txAdvertisers, - txAttempts = Map.empty, - currentMaxInflightMultiplicity = - txInflightMultiplicity defaultTxDecisionPolicy - } - ) - --- Generate the advertiser set for an entry owned by the chosen peer. -genOwnedAdvertisers - :: [PeerAddr] - -> PeerAddr - -> Gen (Set.Set PeerAddr) -genOwnedAdvertisers advertiserPeers owner = - pure (Set.fromList (owner : advertiserPeers)) - --- Generate the advertiser set for a claimable entry. -genResolvedAdvertisers :: [PeerAddr] -> Gen (Set.Set PeerAddr) -genResolvedAdvertisers advertiserPeers = - pure (Set.fromList advertiserPeers) + pure TxEntry { + txLease = TxClaimable claimableAt, + txAttempt = 0, + txInSubmission = False, + currentMaxInflightMultiplicity = + txInflightMultiplicity defaultTxDecisionPolicy + } -- Rebuild a shared state from tx-centric fixtures while preserving interned keys. buildSharedTxState - :: Map.Map PeerAddr PeerSeed - -> [(TxId, Set.Set PeerAddr, TxEntry PeerAddr)] + :: [(TxId, TxEntry PeerAddr)] -> [(TxId, Time)] -> Word64 -> SharedTxState PeerAddr TxId -buildSharedTxState peerSeeds activeEntries retainedEntries sharedGeneration = +buildSharedTxState activeEntries retainedEntries sharedGeneration = baseState { - sharedPeers = deriveSharedPeers baseState peerSeeds activeEntries, sharedTxTable = IntMap.fromList [ (unTxKey (lookupKeyOrFail txid baseState), txEntry) - | (txid, _, txEntry) <- activeEntries + | (txid, txEntry) <- activeEntries ], sharedRetainedTxs = retainedFromList @@ -3967,134 +2812,33 @@ buildSharedTxState peerSeeds activeEntries retainedEntries sharedGeneration = } where baseState = - mkSharedState ([ txid | (txid, _, _) <- activeEntries ] <> fmap fst retainedEntries) - --- Derive peer phases from the generated tx entries. -deriveSharedPeers - :: SharedTxState PeerAddr TxId - -> Map.Map PeerAddr PeerSeed - -> [(TxId, Set.Set PeerAddr, TxEntry PeerAddr)] - -> Map.Map PeerAddr SharedPeerState -deriveSharedPeers baseState peerSeeds activeEntries = - Map.mapWithKey buildPeerState completePeerSeeds - where - completePeerSeeds = - foldl' addMissingPeerSeed peerSeeds (concatMap entryPeers activeEntries) - - peerUsages = - foldl' accumulatePeerUsage Map.empty activeEntries - - peerAdvertisedKeys = - Map.fromListWith IntSet.union - [ (peeraddr, IntSet.singleton (unTxKey (lookupKeyOrFail txid baseState))) - | (txid, txAdvertisers, _) <- activeEntries - , peeraddr <- Set.toList txAdvertisers - ] - - addMissingPeerSeed acc peeraddr = - Map.insertWith (\_ old -> old) peeraddr defaultPeerSeed acc - - buildPeerState peeraddr PeerSeed { peerSeedGeneration } = - SharedPeerState { - sharedPeerAdvertisedTxKeys = - Map.findWithDefault IntSet.empty peeraddr peerAdvertisedKeys, - sharedPeerGeneration = peerSeedGeneration - } - - defaultPeerSeed = - PeerSeed { peerSeedGeneration = 0 } - --- Default derived usage for a peer with no active work. -emptyPeerDerivedUsage :: PeerDerivedUsage -emptyPeerDerivedUsage = - PeerDerivedUsage { - peerHasSubmitting = False, - peerHasRequestedTxs = False - } - --- Fold one tx entry's attempts into the derived per-peer usage map. -accumulatePeerUsage - :: Map.Map PeerAddr PeerDerivedUsage - -> (TxId, Set.Set PeerAddr, TxEntry PeerAddr) - -> Map.Map PeerAddr PeerDerivedUsage -accumulatePeerUsage acc (_, _, TxEntry { txAttempts }) = - foldl' step acc (Map.toList txAttempts) - where - step acc' (peeraddr, attempt) = - case attempt of - TxDownloading -> - updatePeerUsage peeraddr False True acc' - TxSubmitting -> - updatePeerUsage peeraddr True False acc' - TxBuffered -> - updatePeerUsage peeraddr False False acc' - --- Merge one peer's submitting and inflight usage into the accumulator. -updatePeerUsage - :: PeerAddr - -> Bool - -> Bool - -> Map.Map PeerAddr PeerDerivedUsage - -> Map.Map PeerAddr PeerDerivedUsage -updatePeerUsage peeraddr submitting hasRequestedTxs acc = - Map.insert peeraddr usage' acc - where - usage = - Map.findWithDefault emptyPeerDerivedUsage peeraddr acc - usage' = - usage { - peerHasSubmitting = peerHasSubmitting usage || submitting, - peerHasRequestedTxs = peerHasRequestedTxs usage || hasRequestedTxs - } + mkSharedState (fmap fst activeEntries <> fmap fst retainedEntries) --- Collect every peer mentioned by a tx entry. -entryPeers :: (TxId, Set.Set PeerAddr, TxEntry PeerAddr) -> [PeerAddr] -entryPeers (_, txAdvertisers, TxEntry { txLease, txAttempts }) = - leaseOwner <> Set.toList txAdvertisers <> Map.keys txAttempts - where - leaseOwner = - case txLease of - TxLeased owner _ -> [owner] - TxClaimable _ -> [] - --- Shrink shared state by dropping active txs, retained txs, or unused peers. +-- Shrink shared state by dropping active or retained txs. shrinkSharedTxState :: SharedTxState PeerAddr TxId -> [SharedTxState PeerAddr TxId] shrinkSharedTxState sharedState = nub $ [ emptySharedTxState - , buildSharedTxState peerSeeds [] retainedEntries 0 - , buildSharedTxState peerSeeds activeEntries [] 0 - , buildSharedTxState usedPeerSeeds activeEntries retainedEntries 0 + , buildSharedTxState [] retainedEntries 0 + , buildSharedTxState activeEntries [] 0 ] ++ - [ buildSharedTxState peerSeeds activeEntries' retainedEntries 0 + [ buildSharedTxState activeEntries' retainedEntries 0 | activeEntries' <- smallerActiveEntries ] ++ - [ buildSharedTxState peerSeeds activeEntries retainedEntries' 0 + [ buildSharedTxState activeEntries retainedEntries' 0 | retainedEntries' <- smallerRetainedEntries ] where activeEntries = - [ ( resolveTxKey sharedState (TxKey k) - , advertisersForKey k - , txEntry - ) + [ (resolveTxKey sharedState (TxKey k), txEntry) | (k, txEntry) <- IntMap.toList (sharedTxTable sharedState) ] retainedEntries = [ (resolveTxKey sharedState (TxKey k), retainUntil) | (k, retainUntil) <- retainedToList (sharedRetainedTxs sharedState) ] - peerSeeds = - Map.map - (\SharedPeerState { sharedPeerGeneration } -> - PeerSeed { peerSeedGeneration = sharedPeerGeneration }) - (sharedPeers sharedState) - usedPeers = - foldl' (\peers activeEntry -> peers <> entryPeers activeEntry) [] activeEntries - usedPeerSeeds = - Map.filterWithKey (\peeraddr _ -> peeraddr `elem` usedPeers) peerSeeds smallerActiveEntries = take 6 [ activeEntries' @@ -4107,12 +2851,6 @@ shrinkSharedTxState sharedState = | retainedEntries' <- shrinkList (const []) retainedEntries , length retainedEntries' < length retainedEntries ] - advertisersForKey k = - Map.keysSet $ - Map.filter - (\SharedPeerState { sharedPeerAdvertisedTxKeys } -> - IntSet.member k sharedPeerAdvertisedTxKeys) - (sharedPeers sharedState) -- Partition requested keys into a small number of contiguous request batches. genRequestedTxBatches @@ -4153,37 +2891,12 @@ splitByLengths (n : ns) xs = let (prefix, suffix) = splitAt n xs in prefix : splitByLengths ns suffix --- Pick a sublist, falling back to a singleton when the input is non-empty. -genNonEmptySublist :: [a] -> Gen [a] -genNonEmptySublist [] = - pure [] -genNonEmptySublist xs = do - ys <- sublistOf xs - case ys of - [] -> (: []) <$> elements xs - _ -> pure ys - -- Generate distinct positive ints from a bounded shuffled range. genDistinctPositiveInts :: Int -> Gen [Int] genDistinctPositiveInts count | count <= 0 = pure [] | otherwise = take count <$> shuffle [1 .. max count (count * 4 + 5)] --- Pick a peer address biased toward existing peers in the shared state, so --- the generator frequently exercises the "peeraddr already known" code --- paths. Falls back to a fresh small-range address when the shared state --- has no peers. -genPeerAddrBiased :: SharedTxState PeerAddr TxId -> Gen PeerAddr -genPeerAddrBiased sharedState = - case Map.keys (sharedPeers sharedState) of - [] -> genFresh - peers -> frequency - [ (3, elements peers) - , (1, genFresh) - ] - where - genFresh = chooseInt (1, 64) - -- Generate expiry times near the shared test reference time. genSharedExpiryTime :: Gen Time genSharedExpiryTime = @@ -4237,64 +2950,6 @@ mkTx txid txSize = Tx , getTxParent = Nothing } --- Construct a peer fixture with zeroed generation. -mkSharedPeerState :: SharedPeerState -mkSharedPeerState = - SharedPeerState { - sharedPeerAdvertisedTxKeys = IntSet.empty, - sharedPeerGeneration = 0 - } - -withAdvertisedTxKeys :: [TxKey] -> SharedPeerState -> SharedPeerState -withAdvertisedTxKeys txKeys sharedPeerState = - sharedPeerState { - sharedPeerAdvertisedTxKeys = IntSet.fromList (map unTxKey txKeys) - } - -ensurePeerAdvertisesTxKeys - :: PeerAddr - -> [TxKey] - -> SharedTxState PeerAddr TxId - -> SharedTxState PeerAddr TxId -ensurePeerAdvertisesTxKeys peeraddr txKeys st@SharedTxState { sharedPeers } = - st { - sharedPeers = - Map.alter updatePeer peeraddr sharedPeers - } - where - advertisedKeys = IntSet.fromList (map unTxKey txKeys) - - updatePeer Nothing = - Just (withAdvertisedTxKeys txKeys (mkSharedPeerState)) - updatePeer (Just sharedPeerState) = - Just - (sharedPeerState { - sharedPeerAdvertisedTxKeys = - sharedPeerAdvertisedTxKeys sharedPeerState `IntSet.union` advertisedKeys - }) - --- Shift every TxKey referenced by a peer-local state by a constant offset so --- the state can be composed with a foreign SharedTxState without key --- collisions. -shiftPeerTxLocalStateKeys :: Int -> PeerTxLocalState tx -> PeerTxLocalState tx -shiftPeerTxLocalStateKeys offset peerState = peerState { - peerUnacknowledgedTxIds = - fmap shiftTxKey (peerUnacknowledgedTxIds peerState), - peerAvailableTxIds = - IntMap.mapKeysMonotonic (+ offset) (peerAvailableTxIds peerState), - peerRequestedTxs = - IntSet.map (+ offset) (peerRequestedTxs peerState), - peerRequestedTxBatches = - fmap shiftBatch (peerRequestedTxBatches peerState), - peerDownloadedTxs = - IntMap.mapKeysMonotonic (+ offset) (peerDownloadedTxs peerState) - } - where - shiftTxKey (TxKey k) = TxKey (k + offset) - shiftBatch batch = batch { - requestedTxBatchSet = IntSet.map (+ offset) (requestedTxBatchSet batch) - } - -- Intern a list of txids into an otherwise empty shared state. mkSharedState :: [TxId] -> SharedTxState PeerAddr TxId mkSharedState txids = snd (internTxIds txids emptySharedTxState) @@ -4306,15 +2961,6 @@ mkRequestedTxBatch keys requestedTxBatchSize = RequestedTxBatch , requestedTxBatchSize } --- Construct a leased tx entry owned by one peer. -mkTxEntry :: PeerAddr -> SizeInBytes -> Maybe TxAttemptState -> TxDecisionPolicy -> TxEntry PeerAddr -mkTxEntry peeraddr _txSize mAttempt policy = TxEntry - { txLease = TxLeased peeraddr (addTime 10 now) - , txAdvertiserCount = 1 - , txAttempts = maybe Map.empty (Map.singleton peeraddr) mAttempt - , currentMaxInflightMultiplicity = txInflightMultiplicity policy - } - -- Look up an interned key and fail fast in test setup code. lookupKeyOrFail :: TxId -> SharedTxState PeerAddr TxId -> TxKey lookupKeyOrFail txid st = @@ -4329,23 +2975,6 @@ lookupEntryOrFail (TxKey k) st = Just txEntry -> txEntry Nothing -> error "TxLogic.lookupEntryOrFail: missing tx entry" --- Look up a shared peer and fail fast in test setup code. -lookupPeerOrFail :: PeerAddr -> SharedTxState PeerAddr TxId -> SharedPeerState -lookupPeerOrFail peeraddr st = - case Map.lookup peeraddr (sharedPeers st) of - Just sharedPeerState -> sharedPeerState - Nothing -> error "TxLogic.lookupPeerOrFail: missing peer" - --- Shift proposed txids forward until the batch is disjoint from the shared intern table. -freshBatchAgainstSharedState :: SharedTxState PeerAddr TxId -> [(TxId, SizeInBytes)] -> [(TxId, SizeInBytes)] -freshBatchAgainstSharedState sharedState = reverse . snd . foldl' step (reserved, []) - where - reserved = Set.fromList (Map.keys (sharedTxIdToKey sharedState)) - - step (used, acc) (txid, txSize) = - let freshTxId = firstFreshTxId used txid in - (Set.insert (getRawTxId freshTxId) used, (freshTxId, txSize) : acc) - -- Intern the given txids into the shared state and seed each into -- sharedRetainedTxs. seedRetainedTxids @@ -4367,110 +2996,6 @@ seedRetainedTxids policy entries st0 = | (txid, _) <- entries ] --- Intern the given txids and add an active sharedTxTable entry for each, --- advertised by the given peer. -seedActiveTxidsForOtherPeer - :: PeerAddr - -> [(TxId, SizeInBytes)] - -> SharedTxState PeerAddr TxId - -> SharedTxState PeerAddr TxId -seedActiveTxidsForOtherPeer otherPeer entries st0 = - stInterned { - sharedTxTable = - foldl' (\tbl k -> IntMap.insert k activeEntry tbl) - (sharedTxTable stInterned) - activeKeys, - sharedPeers = - Map.adjust augmentAdvertised otherPeer (sharedPeers stInterned) - } - where - activeEntry = TxEntry { - txLease = TxClaimable now, - txAdvertiserCount = 1, - txAttempts = Map.empty, - currentMaxInflightMultiplicity = - txInflightMultiplicity defaultTxDecisionPolicy - } - (_, stInterned) = internTxIds (fmap fst entries) st0 - activeKeys = [ unTxKey (lookupKeyOrFail txid stInterned) - | (txid, _) <- entries - ] - augmentAdvertised sps = sps { - sharedPeerAdvertisedTxKeys = - IntSet.union (sharedPeerAdvertisedTxKeys sps) - (IntSet.fromList activeKeys) - } - --- Intern each requested txid and add an active sharedTxTable entry leased to --- @peeraddr@ with a TxDownloading attempt. Entries whose Bool tag is True --- (co-advertised) are also advertised by @otherPeer@, giving them --- @txAdvertiserCount = 2@ so the omitted-and-released path leaves them --- alive (instead of being reaped by dropDeadActiveKeys). Used by the --- handleReceivedTxs property to build a coherent pre-state for one --- outstanding request batch. -seedRequestedActiveTxids - :: PeerAddr - -> Maybe PeerAddr - -> [((TxId, SizeInBytes), Bool)] - -> SharedTxState PeerAddr TxId - -> SharedTxState PeerAddr TxId -seedRequestedActiveTxids peeraddr otherPeerOpt tagged st0 = - stFinal - where - entries = fmap fst tagged - (_, stInterned) = internTxIds (fmap fst entries) st0 - leaseUntil = addTime (bufferedTxsMinLifetime defaultTxDecisionPolicy) now - - perKey :: [(Int, Bool)] - perKey = - [ ( unTxKey (lookupKeyOrFail txid stInterned) - , coAdv && isJust otherPeerOpt - ) - | ((txid, _), coAdv) <- tagged - ] - - mkEntry coAdv = TxEntry { - txLease = TxLeased peeraddr leaseUntil, - txAdvertiserCount = if coAdv then 2 else 1, - txAttempts = Map.singleton peeraddr TxDownloading, - currentMaxInflightMultiplicity = - txInflightMultiplicity defaultTxDecisionPolicy - } - - stWithTable = stInterned { - sharedTxTable = - foldl' (\tbl (k, coAdv) -> IntMap.insert k (mkEntry coAdv) tbl) - (sharedTxTable stInterned) - perKey - } - - peerAdvertisedAll = IntSet.fromList (map fst perKey) - otherAdvertisedAll = IntSet.fromList [ k | (k, True) <- perKey ] - - augmentWith addKeys sps = sps { - sharedPeerAdvertisedTxKeys = - IntSet.union (sharedPeerAdvertisedTxKeys sps) addKeys - } - - stFinal = stWithTable { - sharedPeers = - let withPeer = - Map.adjust (augmentWith peerAdvertisedAll) peeraddr - (sharedPeers stWithTable) - in case otherPeerOpt of - Just op | not (IntSet.null otherAdvertisedAll) -> - Map.adjust (augmentWith otherAdvertisedAll) op withPeer - _ -> withPeer - } - --- Find the first txid not present in the reserved set. -firstFreshTxId :: Set.Set RawTxId -> TxId -> TxId -firstFreshTxId used = go - where - go txid - | Set.member (getRawTxId txid) used = go (txid + 1) - | otherwise = txid - mkReceiveDuplicateFixture :: Int -> Int -> ReceiveDuplicateFixture mkReceiveDuplicateFixture existingAdvertisers txidCount = ReceiveDuplicateFixture @@ -4535,8 +3060,7 @@ mkFanoutFixture peerCount txidCount = runReceiveDuplicateLoop :: Int -> ReceiveDuplicateFixture -> IO () runReceiveDuplicateLoop iterations ReceiveDuplicateFixture - { rdfPeerAddr - , rdfRequestedTxIds + { rdfRequestedTxIds , rdfTxidsAndSizes , rdfPeerState , rdfSharedState @@ -4550,10 +3074,10 @@ runReceiveDuplicateLoop iterations ReceiveDuplicateFixture (const False) now defaultTxDecisionPolicy - rdfPeerAddr rdfRequestedTxIds rdfTxidsAndSizes rdfPeerState + emptyPeerTxInFlight rdfSharedState _ <- evaluate (rnf result) go (n - 1) @@ -4569,7 +3093,8 @@ runPeerActionLoop iterations PeerActionFixture go 0 = pure () go n = do let result = - nextPeerAction now defaultTxDecisionPolicy pafPeerAddr pafPeerState pafSharedState + nextPeerAction now defaultTxDecisionPolicy pafPeerAddr + pafPeerState emptyPeerTxInFlight pafSharedState _ <- evaluate (rnf result) go (n - 1) @@ -4596,33 +3121,34 @@ runFanoutLoop iterations FanoutFixture in (reverse peerStatesRev, reverse ackResultsRev, sharedStateAfterAck) receiveOne - :: ([(PeerAddr, PeerTxLocalState (Tx TxId))], SharedTxState PeerAddr TxId) + :: ([(PeerAddr, PeerTxLocalState (Tx TxId), PeerTxInFlight)], SharedTxState PeerAddr TxId) -> PeerAddr - -> ([(PeerAddr, PeerTxLocalState (Tx TxId))], SharedTxState PeerAddr TxId) + -> ([(PeerAddr, PeerTxLocalState (Tx TxId), PeerTxInFlight)], SharedTxState PeerAddr TxId) receiveOne (!peerStatesAcc, !sharedStateAcc) peeraddr = let peerState0 = emptyPeerTxLocalState { peerRequestedTxIds = ffRequestedTxIds } - !(peerState', sharedStateAcc') = + !(peerState', peerInFlight', sharedStateAcc') = handleReceivedTxIds (const False) now defaultTxDecisionPolicy - peeraddr ffRequestedTxIds ffTxidsAndSizes peerState0 + emptyPeerTxInFlight sharedStateAcc - in ((peeraddr, peerState') : peerStatesAcc, sharedStateAcc') + in ((peeraddr, peerState', peerInFlight') : peerStatesAcc, sharedStateAcc') acknowledgeOne :: ([(PeerAddr, PeerAction, PeerTxLocalState (Tx TxId))], SharedTxState PeerAddr TxId) - -> (PeerAddr, PeerTxLocalState (Tx TxId)) + -> (PeerAddr, PeerTxLocalState (Tx TxId), PeerTxInFlight) -> ([(PeerAddr, PeerAction, PeerTxLocalState (Tx TxId))], SharedTxState PeerAddr TxId) - acknowledgeOne (!ackResultsAcc, !sharedStateAcc) (peeraddr, peerState0) = - let !(peerAction, peerState', sharedStateAcc') = - nextPeerAction now defaultTxDecisionPolicy peeraddr peerState0 sharedStateAcc + acknowledgeOne (!ackResultsAcc, !sharedStateAcc) (peeraddr, peerState0, peerInFlight0) = + let !(peerAction, peerState', _peerInFlight', sharedStateAcc') = + nextPeerAction now defaultTxDecisionPolicy peeraddr + peerState0 peerInFlight0 sharedStateAcc in ( (peeraddr, peerAction, peerState') : ackResultsAcc , sharedStateAcc' ) @@ -4639,7 +3165,7 @@ mkActiveSharedState -> [PeerAddr] -> [(TxId, SizeInBytes)] -> SharedTxState PeerAddr TxId -mkActiveSharedState allPeers ownerPeer resolvedAdvertisers txidsAndSizes = +mkActiveSharedState _allPeers ownerPeer _resolvedAdvertisers txidsAndSizes = sharedState1 { sharedTxTable = IntMap.fromList @@ -4647,32 +3173,15 @@ mkActiveSharedState allPeers ownerPeer resolvedAdvertisers txidsAndSizes = | (txid, _txSize) <- txidsAndSizes , let txKey = lookupKeyOrFail txid sharedState1 ] - , sharedPeers = - Map.fromList - [ (peeraddr, (mkSharedPeerState) { - sharedPeerAdvertisedTxKeys = - Map.findWithDefault IntSet.empty peeraddr peerAdvertisedKeys - }) - | peeraddr <- allPeers - ] } where sharedState0 = emptySharedTxState sharedState1 = snd (internTxIds (fmap fst txidsAndSizes) sharedState0) - advertisers = Set.fromList (ownerPeer : resolvedAdvertisers) - peerAdvertisedKeys = - Map.fromListWith IntSet.union - [ (peeraddr, IntSet.singleton (unTxKey txKey)) - | (txid, _txSize) <- txidsAndSizes - , let txKey = lookupKeyOrFail txid sharedState1 - , peeraddr <- Set.toList advertisers - ] - mkEntry _txKey = TxEntry { txLease = TxLeased ownerPeer (addTime 10 now) - , txAdvertiserCount = Set.size advertisers - , txAttempts = Map.empty + , txAttempt = 1 + , txInSubmission = False , currentMaxInflightMultiplicity = txInflightMultiplicity defaultTxDecisionPolicy } @@ -4682,11 +3191,6 @@ mkActiveSharedState allPeers ownerPeer resolvedAdvertisers txidsAndSizes = retainAllActiveTxs :: SharedTxState PeerAddr TxId -> SharedTxState PeerAddr TxId retainAllActiveTxs st@SharedTxState { sharedTxTable, sharedRetainedTxs, sharedGeneration } = st { - sharedPeers = - Map.map - (\sharedPeerState -> - sharedPeerState { sharedPeerAdvertisedTxKeys = IntSet.empty }) - (sharedPeers st), sharedTxTable = IntMap.empty, sharedRetainedTxs = IntMap.foldlWithKey' retainOne sharedRetainedTxs sharedTxTable, sharedGeneration = sharedGeneration + 1 diff --git a/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs b/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs index a9fd34d97f8..5bdbf5e7362 100644 --- a/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs +++ b/ouroboros-network/tracing/Ouroboros/Network/Tracing/TxSubmission.hs @@ -16,17 +16,13 @@ instance (Show txid, Show peeraddr) => LogFormatting (TraceTxLogic peeraddr txid mconcat $ [ "kind" .= String "TraceSharedTxState" , "label" .= label , "sharedGeneration" .= sharedGeneration - , "peerCount" .= Map.size sharedPeers , "activeTxCount" .= IntMap.size sharedTxTable , "retainedTxCount" .= retainedSize sharedRetainedTxs , "internedTxCount" .= Map.size sharedTxIdToKey , "leasedTxCount" .= leasedTxCount , "claimableTxCount" .= claimableTxCount - , "resolvedTxCount" .= resolvedTxCount - , "downloadingAttemptCount" .= downloadingAttemptCount - , "bufferedAttemptCount" .= bufferedAttemptCount - , "submittingAttemptCount" .= submittingAttemptCount - , "peerPhases" .= peerPhases + , "totalAttemptCount" .= totalAttemptCount + , "submittingTxCount" .= submittingTxCount ] ++ more where activeEntries = IntMap.elems sharedTxTable @@ -37,35 +33,18 @@ instance (Show txid, Show peeraddr) => LogFormatting (TraceTxLogic peeraddr txid claimableTxCount = length [ () | TxEntry { txLease = TxClaimable _ } <- activeEntries ] - resolvedTxCount = 0 :: Int + totalAttemptCount = + sum [ txAttempt entry | entry <- activeEntries ] - downloadingAttemptCount = - sum [ length [ () | TxDownloading <- Map.elems txAttempts' ] - | TxEntry { txAttempts = txAttempts' } <- activeEntries - ] - - bufferedAttemptCount = - sum [ length [ () | TxBuffered <- Map.elems txAttempts' ] - | TxEntry { txAttempts = txAttempts' } <- activeEntries - ] - - submittingAttemptCount = - sum [ length [ () | TxSubmitting <- Map.elems txAttempts' ] - | TxEntry { txAttempts = txAttempts' } <- activeEntries - ] - - peerPhases :: [(String, Int)] - peerPhases = [] + submittingTxCount = + length [ () | TxEntry { txInSubmission = True } <- activeEntries ] renderTxId txKey = maybe "" show (IntMap.lookup txKey sharedKeyToTxId) more = case dtal of DMaximum -> - [ "sharedPeers" .= [ (show peeraddr, show peerState) - | (peeraddr, peerState) <- Map.toList sharedPeers - ] - , "sharedTxTable" .= [ (renderTxId txKey, show txEntry) + [ "sharedTxTable" .= [ (renderTxId txKey, show txEntry) | (txKey, txEntry) <- IntMap.toList sharedTxTable ] , "sharedRetainedTxs" .= [ (renderTxId txKey, show retainUntil) From 0317ffbb92952318845b29f23ae6a6a6ab7f62a9 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Tue, 5 May 2026 09:14:48 +0200 Subject: [PATCH 63/67] fixup: break out mempoolGetSnapshot Break out mempoolGetSnapshot into its own atomic transaction. --- .../TxSubmission/Inbound/V2/Registry.hs | 45 +++++++++++-------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 99314477064..2bdec7579c7 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -426,9 +426,14 @@ applyReceivedTxIdsImp :: ( MonadSTM m -> PeerTxLocalState tx -> m (PeerTxLocalState tx) applyReceivedTxIdsImp policy mempoolGetSnapshot sharedStateVar peerInFlightVar - countersVar now txIdsToReq txidsAndSizes peerState = + countersVar now txIdsToReq txidsAndSizes peerState = do + -- Snapshot the mempool outside the per-peer STM transaction so mempool + -- writers don't kick the hot path into retries. Stale answers are + -- benign: a false positive delays re-fetch by 'bufferedTxsMinLifetime' + -- via the retained set; a false negative wastes one body fetch that + -- 'handleReceivedTxs' will reclassify as late. + MempoolSnapshot { mempoolHasTx } <- atomically mempoolGetSnapshot atomically $ do - MempoolSnapshot { mempoolHasTx } <- mempoolGetSnapshot sharedState <- readTVar sharedStateVar peerInFlight <- readTVar peerInFlightVar let sharedGeneration0 = sharedGeneration sharedState @@ -456,23 +461,25 @@ applyReceivedTxsImp :: ( MonadSTM m -> PeerTxLocalState tx -> m (Int, PeerTxLocalState tx) applyReceivedTxsImp policy mempoolGetSnapshot sharedStateVar peerInFlightVar - countersVar peeraddr now txs peerState = atomically $ do - MempoolSnapshot { mempoolHasTx } <- mempoolGetSnapshot - sharedState <- readTVar sharedStateVar - peerInFlight <- readTVar peerInFlightVar - let sharedGeneration0 = sharedGeneration sharedState - (omittedCount, lateCount, peerState', peerInFlight', sharedState') = - State.handleReceivedTxs mempoolHasTx now policy peeraddr txs - peerState peerInFlight sharedState - writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' - writePeerInFlightIfChanged peerInFlightVar peerInFlight peerInFlight' - modifyTVar countersVar (<> mempty { - txRepliesReceived = 1, - txsReceived = fromIntegral (length txs), - txsOmitted = fromIntegral omittedCount, - lateBodies = fromIntegral lateCount - }) - return (omittedCount + lateCount, peerState') + countersVar peeraddr now txs peerState = do + -- Mempool snapshot taken in its own STM tx; see 'applyReceivedTxIdsImp'. + MempoolSnapshot { mempoolHasTx } <- atomically mempoolGetSnapshot + atomically $ do + sharedState <- readTVar sharedStateVar + peerInFlight <- readTVar peerInFlightVar + let sharedGeneration0 = sharedGeneration sharedState + (omittedCount, lateCount, peerState', peerInFlight', sharedState') = + State.handleReceivedTxs mempoolHasTx now policy peeraddr txs + peerState peerInFlight sharedState + writeSharedStateIfChanged sharedStateVar sharedGeneration0 sharedState' + writePeerInFlightIfChanged peerInFlightVar peerInFlight peerInFlight' + modifyTVar countersVar (<> mempty { + txRepliesReceived = 1, + txsReceived = fromIntegral (length txs), + txsOmitted = fromIntegral omittedCount, + lateBodies = fromIntegral lateCount + }) + return (omittedCount + lateCount, peerState') -- | Mark txs as submitted to the mempool and update shared state. applySubmittedTxsImp :: ( MonadSTM m From 10b65d47d35d6d421a422d34f9ce8b8e6c431743 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Wed, 6 May 2026 10:48:55 +0200 Subject: [PATCH 64/67] fixup: preserve lookup tables on retained-expiry --- .../TxSubmission/Inbound/V2/Registry.hs | 17 ++-- .../Network/TxSubmission/Inbound/V2/State.hs | 89 +++++++++++++++---- .../Network/TxSubmission/Inbound/V2/Types.hs | 12 ++- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 4 +- 4 files changed, 95 insertions(+), 27 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs index 2bdec7579c7..b4ec49619b6 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Registry.hs @@ -104,8 +104,8 @@ txCountersThreadV2 policy tracer countersVar sharedStateVar registry = do threadDelay sweepInterval now <- getMonotonicTime atomically $ do - liveAdvertised <- snapshotLiveAdvertised registry - modifyTVar sharedStateVar (State.sweepSharedState now liveAdvertised) + liveReferences <- snapshotLiveReferences registry + modifyTVar sharedStateVar (State.sweepSharedState now liveReferences) if now >= nextEmitAt then do current <- readTVarIO countersVar @@ -113,19 +113,24 @@ txCountersThreadV2 policy tracer countersVar sharedStateVar registry = do go current (addTime countersInterval now) else go previous nextEmitAt --- | Read every live peer's 'pifAdvertised' and union them. -snapshotLiveAdvertised +-- | Read every live peer's 'pifAdvertised' and 'pifAcksPending' and +-- union them. This is the set of keys still referenced by some peer +-- (advertised for fetch, or held in @peerUnacknowledgedTxIds@ awaiting +-- ack) and is used by the sweep to know which lookup table entries +-- can be safely reclaimed. +snapshotLiveReferences :: MonadSTM m => PeerTxInFlightRegistry m peeraddr -> STM m IntSet -snapshotLiveAdvertised registry = do +snapshotLiveReferences registry = do peers <- readTVar registry foldr step (pure IntSet.empty) (Map.elems peers) where step var k = do pif <- readTVar var acc <- k - pure $! IntSet.union (pifAdvertised pif) acc + pure $! IntSet.union (pifAdvertised pif) + $ IntSet.union (pifAcksPending pif) acc -- | Peer-facing coordination API. -- diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 474fc7dcc17..31f98f2780b 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -282,7 +282,8 @@ applyRequestTxIdsChoice ctx flavour acknowledgedTxIds txIdsToAcknowledge txIdsTo } pif = pacPeerInFlight ctx peerInFlight'' = pif { - pifAdvertised = pifAdvertised pif `IntSet.difference` acknowledgedKeys + pifAdvertised = pifAdvertised pif `IntSet.difference` acknowledgedKeys, + pifAcksPending = pifAcksPending pif `IntSet.difference` acknowledgedKeys } -- | Construct a 'PeerDoNothing' action. @@ -759,19 +760,49 @@ dropTxKeys keys st@SharedTxState { sharedTxTable, sharedRetainedTxs, sharedTxIdT Just txid -> Map.delete (getRawTxId txid) txIdToKey Nothing -> txIdToKey +-- | Remove only the txid <-> key lookup entries for the given keys. +-- Used by the sweep for keys that have outlived their 'sharedTxTable' +-- and retained residence but had their lookup entries kept while peers +-- still carried the txid in 'peerUnacknowledgedTxIds'. +dropLookupOnly :: HasRawTxId txid + => IntSet.IntSet + -> SharedTxState peeraddr txid + -> SharedTxState peeraddr txid +dropLookupOnly keys st@SharedTxState { sharedTxIdToKey, sharedKeyToTxId } + | IntSet.null keys = st + | otherwise = + st { + sharedTxIdToKey = IntSet.foldl' deleteTxId sharedTxIdToKey keys, + sharedKeyToTxId = IntMap.withoutKeys sharedKeyToTxId keys + } + where + deleteTxId txIdToKey k = + case IntMap.lookup k sharedKeyToTxId of + Just txid -> Map.delete (getRawTxId txid) txIdToKey + Nothing -> txIdToKey + -- | Shared-state cleanup -- --- Drops two kinds of dead entries in one pass: +-- Drops three kinds of dead entries in one pass: -- --- * Retained entries whose retention deadline has passed. +-- * Retained entries whose retention deadline has passed. Only the +-- 'sharedRetainedTxs' membership is removed; the txid lookup tables +-- ('sharedTxIdToKey', 'sharedKeyToTxId') are preserved here because +-- peers may still hold the key in 'peerUnacknowledgedTxIds' until +-- they ack it. -- * Orphaned 'sharedTxTable' entries: entries with a released lease, --- no in-flight attempt, and no live peer still tracking the key in --- its 'pifAdvertised' set. +-- no in-flight attempt, and no live peer still tracking the key. +-- These are safe to fully tear down (lookup tables included): by +-- definition no peer references them. +-- * Stale lookup-table entries: keys present only in 'sharedTxIdToKey' +-- / 'sharedKeyToTxId' with no peer still referencing them. Bounds +-- the lookup tables so they don't grow unboundedly. -- --- The @liveAdvertised@ set is the union of every active peer's --- 'pifAdvertised'. Caller (the sweep thread) snapshots all per-peer --- TVars in the same STM transaction that runs this function so the --- snapshot is coherent with the sharedTxTable read. +-- The @liveReferences@ set is the union of every active peer's +-- 'pifAdvertised' and 'pifAcksPending'. Caller (the sweep thread) +-- snapshots all per-peer TVars in the same STM transaction that runs +-- this function so the snapshot is coherent with the sharedTxTable +-- read. -- -- Bumps 'sharedGeneration' if anything changed so sleeping peer workers wake -- and re-evaluate. @@ -780,23 +811,36 @@ sweepSharedState :: HasRawTxId txid -> IntSet -> SharedTxState peeraddr txid -> SharedTxState peeraddr txid -sweepSharedState now liveAdvertised st - | IntSet.null toDrop = st - | otherwise = (dropTxKeys toDrop st) { - sharedGeneration = sharedGeneration st + 1 - } +sweepSharedState now liveReferences st + | IntSet.null orphans + && IntSet.null expiredRetained + && IntSet.null staleLookups = st + | otherwise = + ( dropLookupOnly staleLookups + . dropTxKeys orphans + $ st { sharedRetainedTxs = + retainedDeleteKeys expiredRetained (sharedRetainedTxs st) } + ) { sharedGeneration = sharedGeneration st + 1 } where expiredRetained = retainedExpiredKeys now (sharedRetainedTxs st) orphans = IntMap.keysSet (IntMap.filterWithKey isOrphan (sharedTxTable st)) - toDrop = expiredRetained `IntSet.union` orphans + + retainedAfter = retainedKeysSet (sharedRetainedTxs st) + `IntSet.difference` expiredRetained + referencedKeys = IntMap.keysSet (sharedTxTable st) + `IntSet.union` retainedAfter + `IntSet.union` liveReferences + staleLookups = IntMap.keysSet (sharedKeyToTxId st) + `IntSet.difference` referencedKeys + `IntSet.difference` orphans isOrphan _ TxEntry { txLease = TxLeased {} } = False isOrphan k TxEntry { txAttempt, txInSubmission } | txAttempt > 0 = False | txInSubmission = False - | IntSet.member k liveAdvertised = False + | IntSet.member k liveReferences = False | otherwise = True {-# INLINABLE sweepSharedState #-} @@ -1113,12 +1157,14 @@ handleReceivedTxIds mempoolHasTx now policy requestedTxIds txidsAndSizes (peerState'', peerInFlight'', sharedState'') where peerAdvertisedKeys0 = pifAdvertised peerInFlight + peerAcksPending0 = pifAcksPending peerInFlight -- Fold over received txids: build unacknowledged list, update tables. ( receivedTxKeysRev , peerAvailableTxIds' , sharedStateHandled , peerAdvertisedKeys' + , peerAcksPending' , sharedChanged ) = foldl' @@ -1127,6 +1173,7 @@ handleReceivedTxIds mempoolHasTx now policy requestedTxIds txidsAndSizes , peerAvailableTxIds peerState , sharedState , peerAdvertisedKeys0 + , peerAcksPending0 , False ) txidsAndSizes @@ -1143,7 +1190,8 @@ handleReceivedTxIds mempoolHasTx now policy requestedTxIds txidsAndSizes } peerInFlight'' = peerInFlight { - pifAdvertised = peerAdvertisedKeys' + pifAdvertised = peerAdvertisedKeys', + pifAcksPending = peerAcksPending' } sharedState'' @@ -1162,6 +1210,7 @@ handleReceivedTxIds mempoolHasTx now policy requestedTxIds txidsAndSizes , IntMap.IntMap SizeInBytes , SharedTxState peeraddr txid , IntSet.IntSet + , IntSet.IntSet , Bool ) -> (txid, SizeInBytes) @@ -1169,6 +1218,7 @@ handleReceivedTxIds mempoolHasTx now policy requestedTxIds txidsAndSizes , IntMap.IntMap SizeInBytes , SharedTxState peeraddr txid , IntSet.IntSet + , IntSet.IntSet , Bool ) step @@ -1176,6 +1226,7 @@ handleReceivedTxIds mempoolHasTx now policy requestedTxIds txidsAndSizes , !availableAcc , !sharedAcc , !peerAdvertisedKeysAcc + , !peerAcksPendingAcc , !sharedChangedAcc ) (txid, txSize) @@ -1184,6 +1235,7 @@ handleReceivedTxIds mempoolHasTx now policy requestedTxIds txidsAndSizes , IntMap.delete k availableAcc , sharedAcc' , IntSet.delete k peerAdvertisedKeysAcc + , IntSet.insert k peerAcksPendingAcc , sharedChangedAcc' ) | mempoolHasTx txid = @@ -1195,6 +1247,7 @@ handleReceivedTxIds mempoolHasTx now policy requestedTxIds txidsAndSizes retainedInsertMax k retainUntil (sharedRetainedTxs sharedAcc') } , IntSet.delete k peerAdvertisedKeysAcc + , IntSet.insert k peerAcksPendingAcc , True ) | otherwise = @@ -1213,6 +1266,7 @@ handleReceivedTxIds mempoolHasTx now policy requestedTxIds txidsAndSizes sharedTxTable = IntMap.insert k txEntry (sharedTxTable sharedAcc') } , IntSet.insert k peerAdvertisedKeysAcc + , IntSet.insert k peerAcksPendingAcc , True ) Just _ -> @@ -1220,6 +1274,7 @@ handleReceivedTxIds mempoolHasTx now policy requestedTxIds txidsAndSizes , IntMap.insert k txSize availableAcc , sharedAcc' , IntSet.insert k peerAdvertisedKeysAcc + , IntSet.insert k peerAcksPendingAcc , sharedChangedAcc' ) where diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs index 3004672d8f9..6ec6271b602 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/Types.hs @@ -216,7 +216,14 @@ data PeerTxInFlight = PeerTxInFlight { -- | Keys this peer currently counts toward 'txInSubmission'. -- Set on 'PeerSubmitTxs' / 'markSubmittingTxs', cleared on accept -- or reject. - pifSubmitting :: !IntSet + pifSubmitting :: !IntSet, + + -- | Keys this peer holds in 'peerUnacknowledgedTxIds'. A superset + -- of 'pifAdvertised': also tracks keys the peer learned about via + -- the retained-or-mempool path of 'handleReceivedTxIds' that are + -- not advertised for fetch but still pending an ack. Used by the + -- sweep to know whether the txid lookup tables can be reclaimed. + pifAcksPending :: !IntSet } deriving stock (Eq, Show, Generic) deriving anyclass (NFData, NoThunks) @@ -226,7 +233,8 @@ emptyPeerTxInFlight = PeerTxInFlight { pifAdvertised = IntSet.empty, pifLeased = IntSet.empty, pifAttempting = IntSet.empty, - pifSubmitting = IntSet.empty + pifSubmitting = IntSet.empty, + pifAcksPending = IntSet.empty } -- | Whether a txid request will be sent as a blocking or pipelined wire diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 74c876960a1..4f08e0c7411 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -353,8 +353,8 @@ sharedTxStateInvariant strength SharedTxState { (IntSet.null (IntMap.keysSet sharedTxTable `IntSet.intersection` retainedKeysSet sharedRetainedTxs)) , counterexample "tx-key maps disagree" (property (keysRoundTripForward && keysRoundTripBackward)) - , counterexample "tx-key maps do not track exactly the live tx keys" - (IntMap.keysSet sharedKeyToTxId === liveKeys) + , counterexample "live tx keys missing from tx-key maps" + (liveKeys `IntSet.isSubsetOf` IntMap.keysSet sharedKeyToTxId) , counterexample "sharedNextTxKey does not stay ahead of all live tx keys" (property (all (< sharedNextTxKey) (IntSet.toList liveKeys))) ] From fd1218d8545d6d97aa57e9391feeba6bbf95734b Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Wed, 6 May 2026 10:50:41 +0200 Subject: [PATCH 65/67] fixup: pickBufferedTxsToSubmit handle missing entries --- .../Network/TxSubmission/Inbound/V2/State.hs | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 31f98f2780b..53c9c4e07a1 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -310,14 +310,21 @@ pickSubmitAction PeerActionContext { pacPeerState, pacPeerInFlight, pacSharedSta else Just txsToSubmit where - -- Walk the unacknowledged txid queue in peer advertisement order, picking - -- bodies buffered by this peer for immediate submission. Stop at the - -- first tx that is unresolved and not available from this peer: later - -- txs in the peer's stream must not run ahead of earlier ones, otherwise - -- a tx may be offered to the mempool before a transaction it depends on - -- is confirmed. Txs already resolved elsewhere (present in - -- 'sharedRetainedTxs') are skipped over since no further action is - -- needed for them. + -- Walk the unacknowledged txid queue in peer advertisement order, + -- picking bodies buffered by this peer for immediate submission. + -- Classification of each entry: + -- + -- * 'sharedTxTable' has 'Nothing' for the key: the tx was resolved at + -- some point (a peer submitted it to the mempool) and the entry was + -- deleted. Orphan sweep cannot drop an entry while any peer still + -- tracks the key, so 'Nothing' here implies resolution; safe to skip + -- and continue past, regardless of whether the retained marker has + -- since expired or the mempool has since evicted. + -- * 'sharedTxTable' has 'Just' but we don't have the body buffered, or + -- another peer is already submitting: the tx is in flight elsewhere. + -- Stop, otherwise later txs in our stream might run ahead of an + -- unresolved earlier tx they depend on. + -- * 'sharedTxTable' has 'Just' and we have the body buffered: submit. pickBufferedTxsToSubmit = go [] (toList (peerUnacknowledgedTxIds pacPeerState)) where go acc [] = reverse acc @@ -327,15 +334,8 @@ pickSubmitAction PeerActionContext { pacPeerState, pacPeerInFlight, pacSharedSta | txBufferedByPeer pacPeerState k , not (txSubmittingByOther pacPeerInFlight k txEntry) -> go (txKey : acc) rest - _ | retainedMember k (sharedRetainedTxs pacSharedState) -> - -- already resolved via another peer - go acc rest - _ | not (IntMap.member k (peerAvailableTxIds pacPeerState)) - , not (IntMap.member k (peerDownloadedTxs pacPeerState)) -> - -- we have already finished with this tx (previously - -- submitted, or never had a body to submit) - go acc rest - _ -> reverse acc + Just _ -> reverse acc + Nothing -> go acc rest -- | Select transactions to request from the peer, if within policy limits. -- From d50cf4f54ebc740aec4aca2b4c5df771c95ead92 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Wed, 6 May 2026 10:52:06 +0200 Subject: [PATCH 66/67] fixup: txIdAckable defers ack while body buffered --- .../lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs index 53c9c4e07a1..b88bc62f789 100644 --- a/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs +++ b/ouroboros-network/lib/Ouroboros/Network/TxSubmission/Inbound/V2/State.hs @@ -714,11 +714,16 @@ txIdAckable :: Eq peeraddr txIdAckable PeerActionContext { pacPeerAddr, pacPeerState, pacPeerInFlight, pacSharedState } (TxKey k) | retainedMember k (sharedRetainedTxs pacSharedState) = True + | IntMap.member k (peerDownloadedTxs pacPeerState) = False + -- We hold the body in our local buffer; we must submit it before + -- acking the txid, otherwise the body would orphan in + -- 'peerDownloadedTxs' if 'pickSubmitAction' is blocked from + -- reaching this entry by an earlier in-flight tx. + | not (IntSet.member k (pifAdvertised pacPeerInFlight)) = True -- The peer no longer tracks the txid as advertised. This covers -- mempool/retained txids that 'handleReceivedTxIds' kept out of the -- advertised set, as well as txids the peer has already attempted -- and submitted (or that another peer resolved). - | not (IntSet.member k (pifAdvertised pacPeerInFlight)) = True | otherwise = case IntMap.lookup k (sharedTxTable pacSharedState) of Just txEntry -> From 13be9cb4a58e64a72f8eaf6b258934ea9c1f3144 Mon Sep 17 00:00:00 2001 From: Karl Knutsson Date: Wed, 6 May 2026 11:40:03 +0200 Subject: [PATCH 67/67] fixup: add counter thread Add the conter thread to the test cases. Add meta tests for AppV2 generators/shrinkers --- .../Ouroboros/Network/TxSubmission/AppV2.hs | 293 +++++++++++++----- .../Network/TxSubmission/Impaired.hs | 2 +- .../Ouroboros/Network/TxSubmission/TxLogic.hs | 4 +- 3 files changed, 226 insertions(+), 73 deletions(-) diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs index b3414be5ce9..d91639e4b03 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/AppV2.hs @@ -44,6 +44,7 @@ import Data.Maybe (fromMaybe, isJust) import Data.Monoid (Sum (..)) import Data.Set qualified as Set import Data.Typeable (Typeable) +import Data.Word (Word64) import Ouroboros.Network.Channel import Ouroboros.Network.ControlMessage (ControlMessage (..), ControlMessageSTM) @@ -75,7 +76,16 @@ import Test.Tasty.QuickCheck (testProperty) tests :: TestTree tests = testGroup "AppV2" - [ testProperty "txSubmission" prop_txSubmission + [ testGroup "Generators" + [ testProperty "TxSubmissionState/validGen" prop_TxSubmissionState_validGen + , testProperty "TxSubmissionState/shrinkValid" + $ prop_TxSubmissionState_shrinkValid + , testProperty "TxSubmissionState/shrinkSmaller" + $ prop_TxSubmissionState_shrinkSmaller + , testProperty "TxSubmissionState/shrinkNoDups" + $ prop_TxSubmissionState_shrinkNoDups + ] + , testProperty "txSubmission" prop_txSubmission , testProperty "inflight" prop_txSubmission_inflight , testProperty "resilientToImpairment" prop_txSubmission_resilientToImpairment , testProperty "SharedTxState" $ withMaxSize 25 @@ -97,7 +107,7 @@ data TxSubmissionState = ) , peerImpairment :: Map Int Impairment , decisionPolicy :: TxDecisionPolicy - } deriving (Show) + } deriving (Eq, Show) instance Arbitrary TxSubmissionState where arbitrary = do @@ -116,15 +126,18 @@ instance Arbitrary TxSubmissionState where decisionPolicy } shrink TxSubmissionState { peerMap, peerImpairment, decisionPolicy } = - [ TxSubmissionState peerMap' peerImpairment policy - | peerMap' <- shrinkMap1 peerMap - , ArbTxDecisionPolicy policy <- shrink (ArbTxDecisionPolicy decisionPolicy) - ] + [ TxSubmissionState peerMap' peerImpairment decisionPolicy + | peerMap' <- shrinkMap1 peerMap + ] + ++ [ TxSubmissionState peerMap peerImpairment policy + | ArbTxDecisionPolicy policy <- shrink (ArbTxDecisionPolicy decisionPolicy) + ] where - shrinkMap1 :: (Ord k, Arbitrary k, Arbitrary v) => Map k v -> [Map k v] + shrinkMap1 :: (Eq v, Ord k, Arbitrary k, Arbitrary v) => Map k v -> [Map k v] shrinkMap1 m - | Map.size m <= 1 = [m] - | otherwise = [Map.delete k m | k <- Map.keys m] ++ singletonMaps + | Map.size m <= 1 = [] + | otherwise = + List.nub $ [Map.delete k m | k <- Map.keys m] ++ singletonMaps where singletonMaps = [Map.singleton k v | (k, v) <- Map.toList m] @@ -163,6 +176,7 @@ runTxSubmission ) => Tracer m (String, TraceSendRecv (TxSubmission2 txid (Tx txid))) -> Tracer m (TraceTxLogic peeraddr txid (Tx txid)) + -> Tracer m TxSubmissionCounters -> Map peeraddr ( [Tx txid] , ControlMessageSTM m , Maybe DiffTime @@ -172,7 +186,7 @@ runTxSubmission -> TxDecisionPolicy -> m ([Tx txid], [[Tx txid]]) -- ^ inbound and outbound mempools -runTxSubmission tracer _tracerTxLogic st0 peerImpairmentMap txDecisionPolicy = do +runTxSubmission tracer _tracerTxLogic countersTracer st0 peerImpairmentMap txDecisionPolicy = do st <- traverse (\(b, c, d, e) -> do mempool <- newMempool b (outChannel, inChannel) <- createConnectedChannels @@ -232,16 +246,20 @@ runTxSubmission tracer _tracerTxLogic st0 peerImpairmentMap txDecisionPolicy = d (txSubmissionServerPeerPipelined server) ) <$> Map.assocs st - withAsyncAll (zip clients servers) $ \as -> do - _ <- waitAllServers as + withAsync (txCountersThreadV2 txDecisionPolicy countersTracer + txCountersVar sharedTxStateVar inFlightRegistry) + \countersAid -> + withAsyncAll (zip clients servers) $ \as -> do + _ <- waitAllServers as + cancel countersAid - inmp <- readMempool inboundMempool - dupTxIds <- Lazy.readTVarIO duplicateTxIdsVar - let outmp = map (\(txs, _, _, _) -> txs) - $ Map.elems st0 - dupTxs = [ txMap Map.! txid | txid <- dupTxIds] + inmp <- readMempool inboundMempool + dupTxIds <- Lazy.readTVarIO duplicateTxIdsVar + let outmp = map (\(txs, _, _, _) -> txs) + $ Map.elems st0 + dupTxs = [ txMap Map.! txid | txid <- dupTxIds] - return (inmp <> dupTxs, outmp) + return (inmp <> dupTxs, outmp) where waitAllServers :: [(Async m x, Async m x)] -> m [Either SomeException x] waitAllServers [] = return [] @@ -303,13 +321,154 @@ txSubmissionSimulation (TxSubmissionState state peerImpairment txDecisionPolicy) ) \_ -> do let tracer :: forall a. (Show a, Typeable a) => Tracer (IOSim s) a tracer = dynamicTracer <> sayTracer -- <> verboseTracer <> debugTracer - runTxSubmission tracer tracer state'' peerImpairment txDecisionPolicy + runTxSubmission tracer tracer tracer state'' peerImpairment txDecisionPolicy filterValidTxs :: [Tx txid] -> [Tx txid] filterValidTxs = filter getTxValid . takeWhile (\Tx{getTxSize, getTxAdvSize} -> getTxSize == getTxAdvSize) +-- | Pretty-print only the say-tracer events of an IOSim trace, prefixed +-- with their simulation time. The full 'ppTrace' includes scheduling +-- noise (ThreadDelay, Deschedule, TxCommitted, etc.) that obscures the +-- protocol-relevant events; this helper restricts the output to the +-- events that the test infrastructure forwards through 'sayTracer' +-- (protocol messages, counter snapshots, shared-state changes). +ppSayTrace :: SimTrace a -> String +ppSayTrace tr = + List.intercalate "\n" + $ map (\(Time t, ev) -> show t <> " " <> ev) + $ selectTraceEventsSayWithTime' tr + +-- | Documented invariants of a 'TxSubmissionState' produced by the +-- 'Arbitrary' instance. Used by the generator/shrinker meta-tests. +validTxSubmissionState :: TxSubmissionState -> Bool +validTxSubmissionState (TxSubmissionState peerMap peerImpairment policy) = + not (Map.null peerMap) + && Map.keysSet peerImpairment `Set.isSubsetOf` Map.keysSet peerMap + && all validPeer (Map.elems peerMap) + && validPolicy policy + where + validPeer (txs, _, _) = + let txids = getTxId <$> txs in + not (null txs) + && length txids == Set.size (Set.fromList txids) + +validPolicy :: TxDecisionPolicy -> Bool +validPolicy TxDecisionPolicy + { maxNumTxIdsToRequest, maxUnacknowledgedTxIds + , txsSizeInflightPerPeer, maxOutstandingTxBatchesPerPeer + , txInflightMultiplicity, bufferedTxsMinLifetime + , scoreRate, scoreMax, interTxSpace, inflightTimeout + } = + getNumTxIdsToReq maxNumTxIdsToRequest >= 1 + && getNumTxIdsToReq maxUnacknowledgedTxIds >= 1 + && getSizeInBytes txsSizeInflightPerPeer >= 1 + && maxOutstandingTxBatchesPerPeer >= 1 + && txInflightMultiplicity >= 1 + && bufferedTxsMinLifetime >= 0 + && scoreRate >= 0 + && scoreMax >= 0 + && interTxSpace >= 0 + && inflightTimeout > interTxSpace + +prop_TxSubmissionState_validGen :: TxSubmissionState -> Property +prop_TxSubmissionState_validGen st = + counterexample (show st) + $ validTxSubmissionState st + +prop_TxSubmissionState_shrinkValid :: TxSubmissionState -> Property +prop_TxSubmissionState_shrinkValid st = conjoin + [ counterexample (show s) (validTxSubmissionState s) + | s <- shrink st + ] + +-- | Every shrunk state differs from the input. Catches base cases like +-- 'shrinkMap1 = [m]' that re-emit the original. +prop_TxSubmissionState_shrinkSmaller :: TxSubmissionState -> Property +prop_TxSubmissionState_shrinkSmaller st = conjoin + [ counterexample ("shrink emitted self: " ++ show s) (s /= st) + | s <- shrink st + ] + +-- | Shrink output contains no duplicates. Catches 'shrinkMap1' generating +-- the same singleton via different paths and the cross-product producing +-- equivalent candidates. +prop_TxSubmissionState_shrinkNoDups :: TxSubmissionState -> Property +prop_TxSubmissionState_shrinkNoDups st = + let shrunk = shrink st in + counterexample ("duplicates: " ++ show (shrunk List.\\ List.nub shrunk)) + $ length (List.nub shrunk) === length shrunk + + +-- | Invariants over the counter snapshots emitted by 'txCountersThreadV2'. +-- Asserts monotonicity of every field, protocol-level causality bounds, +-- decomposition of total txid sends into blocking and pipelined, and body +-- accounting (received bounded by requested, classified bounded by received). +prop_counterInvariants :: SimTrace a -> Property +prop_counterInvariants tr = + let snapshots :: [TxSubmissionCounters] + snapshots = selectTraceEventsDynamic tr in + counterexample ("snapshots: " ++ show (length snapshots)) + $ conjoin + [ counterexample "monotonicity" (checkMonotonic snapshots) + , counterexample "causality" (conjoin (checkCausality <$> snapshots)) + , counterexample "decomposition" (conjoin (checkDecomp <$> snapshots)) + , counterexample "body-accounting" + (conjoin (checkBodyAccounting <$> snapshots)) + ] + where + counterFields :: [(String, TxSubmissionCounters -> Word64)] + counterFields = + [ ("txIdMessagesSent", txIdMessagesSent) + , ("txIdsRequested", txIdsRequested) + , ("txIdRepliesReceived", txIdRepliesReceived) + , ("txIdsReceived", txIdsReceived) + , ("txMessagesSent", txMessagesSent) + , ("txsRequested", txsRequested) + , ("txRepliesReceived", txRepliesReceived) + , ("txsReceived", txsReceived) + , ("txsOmitted", txsOmitted) + , ("lateBodies", lateBodies) + , ("txsAccepted", txsAccepted) + , ("txsRejected", txsRejected) + , ("txIdBlockingReqsSent", txIdBlockingReqsSent) + , ("txIdPipelinedReqsSent", txIdPipelinedReqsSent) + , ("txIdBlockingWaitMs", txIdBlockingWaitMs) + , ("txPipelineWaitMs", txPipelineWaitMs) + , ("txSubmissionWaitMs", txSubmissionWaitMs) + ] + + checkMonotonic xs = conjoin + [ counterexample (name ++ ": " ++ show (f a) ++ " > " ++ show (f b)) + (f a <= f b) + | (name, f) <- counterFields + , (a, b) <- zip xs (drop 1 xs) + ] + + checkCausality s = conjoin + [ counterexample "txIdRepliesReceived > txIdMessagesSent" + (txIdRepliesReceived s <= txIdMessagesSent s) + , counterexample "txRepliesReceived > txMessagesSent" + (txRepliesReceived s <= txMessagesSent s) + , counterexample "txIdsReceived > txIdsRequested" + (txIdsReceived s <= txIdsRequested s) + , counterexample "txsReceived > txsRequested" + (txsReceived s <= txsRequested s) + ] + + checkDecomp s = + counterexample "txIdMessagesSent /= blocking + pipelined" + $ txIdMessagesSent s + === txIdBlockingReqsSent s + txIdPipelinedReqsSent s + + checkBodyAccounting s = + counterexample + ("accepted + rejected + late > received: " + ++ show (txsAccepted s, txsRejected s, lateBodies s, txsReceived s)) + (txsAccepted s + txsRejected s + lateBodies s <= txsReceived s) + + -- | Tests overall tx submission semantics. The properties checked in this -- property test are the same as for tx submission v1. We need this to know we -- didn't regress. @@ -339,61 +498,55 @@ prop_txSubmission st@(TxSubmissionState peers _ _) = $ case traceResult True tr of Left e -> counterexample (show e) - . counterexample (ppTrace tr) + . counterexample (ppSayTrace tr) $ False Right (inmp, outmps) -> - counterexample (ppTrace tr) - $ conjoin (validate inmp `map` outmps) + counterexample (ppSayTrace tr) + $ conjoin (validate inmp `map` outmps) .&&. prop_counterInvariants tr where - checkMempools :: [Tx Int] -> [Tx Int] -> Bool + -- | Asserts that every txid produced is present in the consumer set. + -- On failure the counterexample names the missing txids so the + -- diagnostic points directly at what's wrong. + checkMempools :: [Tx Int] -> [Tx Int] -> Property checkMempools consumer producer = - let producer' = Set.fromList $ getTxId <$> producer - consumer' = Set.fromList $ getTxId <$> consumer - in producer' `Set.isSubsetOf` consumer' + let producer' = Set.fromList (getTxId <$> producer) + consumer' = Set.fromList (getTxId <$> consumer) + missing = producer' `Set.difference` consumer' in + counterexample ("missing from inbound mempool: " ++ show (Set.toList missing)) + $ Set.null missing validate :: [Tx Int] -- the inbound mempool -> [Tx Int] -- one of the outbound mempools -> Property validate inmp outmp = let outUniqueTxIds = nubBy (on (==) getTxId) outmp - outValidTxs = filterValidTxs outmp - in - case ( length outUniqueTxIds == length outmp - , length outValidTxs == length outmp - ) of - x@(True, True) -> - -- If we are presented with a stream of unique txids for valid - -- transactions the inbound transactions should match the outbound - -- transactions exactly. - counterexample (show x) - . counterexample (show inmp) - . counterexample (show outmp) + outValidTxs = filterValidTxs outmp in + counterexample ("inbound mempool: " ++ show (getTxId <$> inmp)) + . counterexample ("outbound mempool: " ++ show (getTxId <$> outmp)) + $ case ( length outUniqueTxIds == length outmp + , length outValidTxs == length outmp + ) of + (True, True) -> + counterexample + "case (unique-txids, all-valid): every valid tx must reach inbound" $ checkMempools inmp outValidTxs - x@(True, False) | Nothing <- List.find (\tx -> getTxAdvSize tx /= getTxSize tx) outmp -> - -- If we are presented with a stream of unique txids then we should have - -- fetched all valid transactions if all txs have valid sizes. - counterexample (show x) - . counterexample (show inmp) - . counterexample (show outValidTxs) - + (True, False) | Nothing <- List.find (\tx -> getTxAdvSize tx /= getTxSize tx) outmp -> + counterexample + "case (unique-txids, has-invalid, sizes-match): every valid tx must reach inbound" $ checkMempools inmp outValidTxs - | otherwise -> - -- If there's one tx with an invalid size, we will download only - -- some of them, but we don't guarantee how many we will download. - -- - -- This is ok, the peer is cheating. - property True - - - x@(False, True) -> - -- If we are presented with a stream of valid txids then we should have - -- fetched some version of those transactions. - counterexample (show x) - . counterexample (show inmp) - . counterexample (show outmp) - $ checkMempools inmp - (filterValidTxs outUniqueTxIds) + + (True, False) -> + -- One tx has a wrong advertised size: the peer is cheating, no + -- guarantee on how much we download. + counterexample + "case (unique-txids, has-invalid, has-size-mismatch): peer cheating, no guarantee" + $ property True + + (False, True) -> + counterexample + "case (duplicate-txids, all-valid): some version of every valid txid must reach inbound" + $ checkMempools inmp (filterValidTxs outUniqueTxIds) (False, False) -> -- If we are presented with a stream of valid and invalid Txs with @@ -419,20 +572,20 @@ prop_txSubmission_inflight st@(TxSubmissionState state _ policy) = ) state trace = runSimTrace (txSubmissionSimulation st) - pTrace = List.intercalate "\n" $ map (\(Time t, ev) -> show t <> " " <> ev) $ - selectTraceEventsSayWithTime' trace + pTrace = ppSayTrace trace in case traceResult True trace of - Left err -> counterexample pTrace --(ppTrace trace) + Left err -> counterexample pTrace $ counterexample (show err) $ property False Right (inmp, _) -> let resultRepeatedValidTxs = foldr fn Map.empty inmp in label (if hasInvalidSize then "has wrongly sized tx" else "has no wrongly sized tx") - . counterexample pTrace --(ppTrace trace) + . counterexample pTrace . counterexample ("hasInvalidSize: " <> show hasInvalidSize) . counterexample ("Result valid [(txid, repeated)]:\n" <> show resultRepeatedValidTxs) . counterexample ("Testcase max valid [(txid, repeated)]:\n" <> show maxRepeatedValidTxs) + . (\p -> p .&&. prop_counterInvariants trace) . conjoin . Map.elems $ if hasInvalidSize then merge (mapMissing \_txid _left -> error "impossible") (mapMissing \_txid _right -> True) @@ -493,11 +646,12 @@ prop_txSubmission_resilientToImpairment baseSt = case traceResult True tr of Left e -> counterexample (show e) - . counterexample (ppTrace tr) + . counterexample (ppSayTrace tr) $ False Right (inmp, _) -> - counterexample (ppTrace tr) + counterexample (ppSayTrace tr) $ conjoin (validateWellBehaved inmp `map` wbPeerTxs) + .&&. prop_counterInvariants tr where -- Pick a non-empty proper subset of peers to impair. Each impaired -- peer gets some mix of body delay and per-body omission (at least @@ -563,10 +717,9 @@ prop_txSubmission_resilientToImpairment baseSt = prop_sharedTxStateInvariant :: TxSubmissionState -> Property prop_sharedTxStateInvariant initialState@(TxSubmissionState st0 _ _) = let tr = runSimTrace (() <$ txSubmissionSimulation initialState) - pTrace = List.intercalate "\n" $ map (\(Time t, ev) -> show t <> " " <> ev) $ - selectTraceEventsSayWithTime' tr + pTrace = ppSayTrace tr in case traceResult True tr of - Left err -> counterexample pTrace --(ppTrace tr) + Left err -> counterexample pTrace . counterexample (show err) $ False Right _ -> diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Impaired.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Impaired.hs index 8facbd7e8d8..93b8c13575d 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Impaired.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/Impaired.hs @@ -115,7 +115,7 @@ data Impairment = Impairment -- ^ per-body Bernoulli drop probability, in [0, 1] , impairSeed :: Int -- ^ seed for the per-peer StdGen used by 'omitBodies' - } deriving Show + } deriving (Eq, Show) -- | The neutral impairment: no delay, no omission. Equivalent to running the -- client unwrapped. diff --git a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs index 4f08e0c7411..e320fe69a01 100644 --- a/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs +++ b/ouroboros-network/tests/lib/Test/Ouroboros/Network/TxSubmission/TxLogic.hs @@ -389,7 +389,7 @@ sharedTxStateInvariant strength SharedTxState { ] newtype ArbTxDecisionPolicy = ArbTxDecisionPolicy TxDecisionPolicy - deriving Show + deriving (Eq, Show) newtype ArbSharedTxState = ArbSharedTxState (SharedTxState PeerAddr TxId) deriving Show @@ -449,7 +449,7 @@ instance Arbitrary ArbTxDecisionPolicy where shrink (ArbTxDecisionPolicy a) | a == defaultTxDecisionPolicy = [] - | otherwise = + | otherwise = nub $ ArbTxDecisionPolicy defaultTxDecisionPolicy : [ ArbTxDecisionPolicy a { maxNumTxIdsToRequest = NumTxIdsToReq x } | (Positive (Small x)) <- shrink (Positive (Small (getNumTxIdsToReq (maxNumTxIdsToRequest a))))