diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index f75f59b2dd..812267446f 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -23,10 +23,6 @@ jobs: steps: - id: set_ghc_versions run: | - # benching the two latest GHCs we support now - # since benchmark are expansive. - # choosing the two latest are easier to maintain and more forward looking - # see discussion https://github.com/haskell/haskell-language-server/pull/4118 # Benchmarking only the two latest GHCs we support now # since benchmark are expensive. # Choosing the two latest is easier to maintain and more forward looking. diff --git a/cabal.project b/cabal.project index d9085d43ca..2d50064b04 100644 --- a/cabal.project +++ b/cabal.project @@ -51,7 +51,6 @@ constraints: allow-newer: cabal-install-parsers:Cabal-syntax, - if impl(ghc >= 9.13) allow-newer: cabal-install-parsers:base, diff --git a/ghcide-bench/src/Experiments.hs b/ghcide-bench/src/Experiments.hs index 8e70483559..29b74716f9 100644 --- a/ghcide-bench/src/Experiments.hs +++ b/ghcide-bench/src/Experiments.hs @@ -396,7 +396,28 @@ examplesPath :: FilePath examplesPath = "bench/example" defConfig :: Config -Success defConfig = execParserPure defaultPrefs (info configP fullDesc) [] +defConfig = Config + { verbosity = Normal + , shakeProfiling = Nothing + , otMemoryProfiling = Nothing + , outputCSV = "results.csv" + , buildTool = Cabal + , ghcideOptions = [] + , matches = [] + , repetitions = Nothing + , ghcide = "ghcide" + , timeoutLsp = 60 + , example = Example + { exampleName = "Cabal" + , exampleDetails = ExampleHackage ExamplePackage + { packageName = "Cabal" + , packageVersion = makeVersion [3,16,1,0] + } + , exampleModules = ["src/Distribution/Simple.hs"] + , exampleExtraArgs = [] + } + , lspConfig = False + } quiet, verbose :: Config -> Bool verbose = (== All) . verbosity diff --git a/ghcide/session-loader/Development/IDE/Session.hs b/ghcide/session-loader/Development/IDE/Session.hs index 7e1a062a7a..1b10a68631 100644 --- a/ghcide/session-loader/Development/IDE/Session.hs +++ b/ghcide/session-loader/Development/IDE/Session.hs @@ -905,7 +905,7 @@ session recorder sessionShake sessionState knownTargetsVar(hieYaml, cfp, opts, l -- Typecheck all files in the project on startup unless (null new_components_info || not checkProject) $ do cfps' <- liftIO $ filterM (IO.doesFileExist . fromNormalizedFilePath) (concatMap targetLocations all_targets) - void $ enqueueActions sessionShake $ mkDelayedAction "InitialLoad" Debug $ void $ do + initialLoad <- mkDelayedAction "InitialLoad" Debug $ void $ do mmt <- uses GetModificationTime cfps' let cs_exist = catMaybes (zipWith (<$) cfps' mmt) modIfaces <- uses GetModIface cs_exist @@ -913,6 +913,7 @@ session recorder sessionShake sessionState knownTargetsVar(hieYaml, cfp, opts, l shakeExtras <- getShakeExtras let !exportsMap' = createExportsMap $ mapMaybe (fmap hirModIface) modIfaces liftIO $ atomically $ modifyTVar' (exportsMap shakeExtras) (exportsMap' <>) + void $ enqueueActions sessionShake initialLoad return [keys1, keys2] -- | Create a new HscEnv from a hieYaml root and a set of options diff --git a/ghcide/src/Development/IDE/Core/FileStore.hs b/ghcide/src/Development/IDE/Core/FileStore.hs index 7d253131d6..8fb87d48f5 100644 --- a/ghcide/src/Development/IDE/Core/FileStore.hs +++ b/ghcide/src/Development/IDE/Core/FileStore.hs @@ -286,8 +286,9 @@ setFileModified recorder vfs state saved nfp actionBefore = do typecheckParents recorder state nfp typecheckParents :: Recorder (WithPriority Log) -> IdeState -> NormalizedFilePath -> IO () -typecheckParents recorder state nfp = void $ shakeEnqueue (shakeExtras state) parents - where parents = mkDelayedAction "ParentTC" L.Debug (typecheckParentsAction recorder nfp) +typecheckParents recorder state nfp = do + parents <- mkDelayedAction "ParentTC" L.Debug (typecheckParentsAction recorder nfp) + void $ shakeEnqueue (shakeExtras state) parents typecheckParentsAction :: Recorder (WithPriority Log) -> NormalizedFilePath -> Action () typecheckParentsAction recorder nfp = do diff --git a/ghcide/src/Development/IDE/Core/OfInterest.hs b/ghcide/src/Development/IDE/Core/OfInterest.hs index 19e0f40e24..79addaa39a 100644 --- a/ghcide/src/Development/IDE/Core/OfInterest.hs +++ b/ghcide/src/Development/IDE/Core/OfInterest.hs @@ -15,7 +15,7 @@ module Development.IDE.Core.OfInterest( kick, FileOfInterestStatus(..), OfInterestVar(..), scheduleGarbageCollection, - Log(..) + Log(..), doKick ) where import Control.Concurrent.Strict @@ -39,7 +39,7 @@ import Development.IDE.Plugin.Completions.Types import Development.IDE.Types.Exports import Development.IDE.Types.Location import Development.IDE.Types.Options (IdeTesting (..)) -import Development.IDE.Types.Shake (toKey) +import Development.IDE.Types.Shake (toKey, toNoFileKey) import GHC.TypeLits (KnownSymbol) import Ide.Logger (Pretty (pretty), Priority (..), @@ -66,6 +66,10 @@ ofInterestRules :: Recorder (WithPriority Log) -> Rules () ofInterestRules recorder = do addIdeGlobal . OfInterestVar =<< liftIO (newVar HashMap.empty) addIdeGlobal . GarbageCollectVar =<< liftIO (newVar False) + -- A no-file rule to perform the global kick action + defineEarlyCutOffNoFile (cmapWithPrio LogShake recorder) $ \Kick -> do + kick + pure ("", ()) defineEarlyCutoff (cmapWithPrio LogShake recorder) $ RuleNoDiagnostics $ \IsFileOfInterest f -> do alwaysRerun filesOfInterest <- getFilesOfInterestUntracked @@ -113,7 +117,7 @@ addFileOfInterest state f v = do then do logWith (ideLogger state) Debug $ LogSetFilesOfInterest (HashMap.toList files) - return [toKey IsFileOfInterest f] + return [toKey IsFileOfInterest f, toNoFileKey Kick] else return [] deleteFileOfInterest :: IdeState -> NormalizedFilePath -> IO [Key] @@ -122,12 +126,21 @@ deleteFileOfInterest state f = do files <- modifyVar' var $ HashMap.delete f logWith (ideLogger state) Debug $ LogSetFilesOfInterest (HashMap.toList files) - return [toKey IsFileOfInterest f] + return [toKey IsFileOfInterest f, toNoFileKey Kick] scheduleGarbageCollection :: IdeState -> IO () scheduleGarbageCollection state = do GarbageCollectVar var <- getIdeGlobalState state writeVar var True +doKick :: Action () +doKick = do + ShakeExtras{ideTesting = IdeTesting testing} <- getShakeExtras + -- only kick always if testing, otherwise we rely on the kick rule + if testing + then kick + else void $ useNoFile Kick + + -- | Typecheck all the files of interest. -- Could be improved kick :: Action () diff --git a/ghcide/src/Development/IDE/Core/PluginUtils.hs b/ghcide/src/Development/IDE/Core/PluginUtils.hs index 2b5caf8ff0..398bfbf3bd 100644 --- a/ghcide/src/Development/IDE/Core/PluginUtils.hs +++ b/ghcide/src/Development/IDE/Core/PluginUtils.hs @@ -71,14 +71,16 @@ import qualified StmContainers.Map as STM -- |ExceptT version of `runAction`, takes a ExceptT Action runActionE :: MonadIO m => String -> IdeState -> ExceptT e Action a -> ExceptT e m a runActionE herald ide act = - mapExceptT liftIO . ExceptT $ - join $ shakeEnqueue (shakeExtras ide) (mkDelayedAction herald Logger.Debug $ runExceptT act) + mapExceptT liftIO . ExceptT $ do + delayed <- mkDelayedAction herald Logger.Debug $ runExceptT act + join $ shakeEnqueue (shakeExtras ide) delayed -- |MaybeT version of `runAction`, takes a MaybeT Action runActionMT :: MonadIO m => String -> IdeState -> MaybeT Action a -> MaybeT m a runActionMT herald ide act = - mapMaybeT liftIO . MaybeT $ - join $ shakeEnqueue (shakeExtras ide) (mkDelayedAction herald Logger.Debug $ runMaybeT act) + mapMaybeT liftIO . MaybeT $ do + delayed <- mkDelayedAction herald Logger.Debug $ runMaybeT act + join $ shakeEnqueue (shakeExtras ide) delayed -- |ExceptT version of `use` that throws a PluginRuleFailed upon failure useE :: IdeRule k v => k -> NormalizedFilePath -> ExceptT PluginError Action v diff --git a/ghcide/src/Development/IDE/Core/RuleTypes.hs b/ghcide/src/Development/IDE/Core/RuleTypes.hs index e10c26e953..7394cce308 100644 --- a/ghcide/src/Development/IDE/Core/RuleTypes.hs +++ b/ghcide/src/Development/IDE/Core/RuleTypes.hs @@ -518,6 +518,14 @@ data IsFileOfInterest = IsFileOfInterest instance Hashable IsFileOfInterest instance NFData IsFileOfInterest +-- | A no-file rule that triggers the IDE "kick" action +data Kick = Kick + deriving (Eq, Show, Generic) +instance Hashable Kick +instance NFData Kick + +type instance RuleResult Kick = () + data GetModSummaryWithoutTimestamps = GetModSummaryWithoutTimestamps deriving (Eq, Show, Generic) instance Hashable GetModSummaryWithoutTimestamps diff --git a/ghcide/src/Development/IDE/Core/Service.hs b/ghcide/src/Development/IDE/Core/Service.hs index 3d98833ab2..aefb8ffdde 100644 --- a/ghcide/src/Development/IDE/Core/Service.hs +++ b/ghcide/src/Development/IDE/Core/Service.hs @@ -108,5 +108,6 @@ shutdown = shakeShut -- available. There might still be other rules running at this point, -- e.g., the ofInterestRule. runAction :: String -> IdeState -> Action a -> IO a -runAction herald ide act = - join $ shakeEnqueue (shakeExtras ide) (mkDelayedAction herald Logger.Debug act) +runAction herald ide act = do + delayed <- mkDelayedAction herald Logger.Debug act + join $ shakeEnqueue (shakeExtras ide) delayed diff --git a/ghcide/src/Development/IDE/Core/Shake.hs b/ghcide/src/Development/IDE/Core/Shake.hs index 92bac9321c..5537a1b8b4 100644 --- a/ghcide/src/Development/IDE/Core/Shake.hs +++ b/ghcide/src/Development/IDE/Core/Shake.hs @@ -108,8 +108,7 @@ import Data.Hashable import qualified Data.HashMap.Strict as HMap import Data.HashSet (HashSet) import qualified Data.HashSet as HSet -import Data.List.Extra (foldl', partition, - takeEnd) +import Data.List.Extra (partition, takeEnd) import qualified Data.Map.Strict as Map import Data.Maybe import qualified Data.SortedList as SL @@ -119,7 +118,6 @@ import Data.Time import Data.Traversable import Data.Tuple.Extra import Data.Typeable -import Data.Unique import Data.Vector (Vector) import qualified Data.Vector as Vector import Development.IDE.Core.Debouncer @@ -147,12 +145,21 @@ import Development.IDE.GHC.Orphans () import Development.IDE.Graph hiding (ShakeValue, action) import qualified Development.IDE.Graph as Shake -import Development.IDE.Graph.Database (ShakeDatabase, +import Development.IDE.Graph.Database (RuntimeRestartKeys (..), + ShakeDatabase, + instantiateDelayedAction, + shakeComputeToPreserve, + shakeGetActionQueueLength, shakeGetBuildStep, shakeGetDatabaseKeys, - shakeNewDatabase, + shakeNewDatabaseWithRuntime, + shakePeekAsyncsDelivers, shakeProfileDatabase, - shakeRunDatabaseForKeys) + shakeRunDatabaseForKeysSep, + shakeShutDatabase) +import qualified Development.IDE.Graph.Database as GraphDatabase +import Development.IDE.Graph.Internal.Action (pumpActionThread) +import qualified Development.IDE.Graph.Internal.Types as GraphRuntime import Development.IDE.Graph.Rule import Development.IDE.Types.Action import Development.IDE.Types.Diagnostics @@ -190,7 +197,7 @@ import UnliftIO (MonadUnliftIO (withRunI data Log = LogCreateHieDbExportsMapStart | LogCreateHieDbExportsMapFinish !Int - | LogBuildSessionRestart !String ![DelayedActionInternal] !KeySet !Seconds !(Maybe FilePath) + | LogBuildSessionRestart !String ![DelayedActionInternal] !KeySet !Seconds !(Maybe FilePath) !RuntimeRestartStats | LogBuildSessionRestartTakingTooLong !Seconds | LogDelayedAction !(DelayedAction ()) !Seconds | LogBuildSessionFinish !(Maybe SomeException) @@ -205,17 +212,32 @@ data Log | LogSetFilesOfInterest ![(NormalizedFilePath, FileOfInterestStatus)] deriving Show +data RuntimeRestartStats = RuntimeRestartStats + { runtimeDirtyCount :: !Int + , runtimeAffectedCount :: !Int + , runtimePreservedCount :: !Int + , runtimeActionQueueCount :: !Int + , runtimeSurvivingActions :: ![String] + } + deriving Show + instance Pretty Log where pretty = \case LogCreateHieDbExportsMapStart -> "Initializing exports map from hiedb" LogCreateHieDbExportsMapFinish exportsMapSize -> "Done initializing exports map from hiedb. Size:" <+> pretty exportsMapSize - LogBuildSessionRestart reason actionQueue keyBackLog abortDuration shakeProfilePath -> + LogBuildSessionRestart reason actionQueue keyBackLog abortDuration shakeProfilePath RuntimeRestartStats{..} -> vcat [ "Restarting build session due to" <+> pretty reason , "Action Queue:" <+> pretty (map actionName actionQueue) , "Keys:" <+> pretty (map show $ toListKeySet keyBackLog) + , "Runtime:" + <+> "dirty=" <> pretty runtimeDirtyCount + <> ", affected=" <> pretty runtimeAffectedCount + <> ", preserved=" <> pretty runtimePreservedCount + <> ", queued=" <> pretty runtimeActionQueueCount + <> ", surviving=" <> pretty runtimeSurvivingActions , "Aborting previous build session took" <+> pretty (showDuration abortDuration) <+> pretty shakeProfilePath ] LogBuildSessionRestartTakingTooLong seconds -> "Build restart is taking too long (" <> pretty seconds <> " seconds)" @@ -723,7 +745,9 @@ shakeOpen recorder lspEnv defaultConfig idePlugins debouncer vfsVar <- newTVarIO =<< vfsSnapshot lspEnv pure ShakeExtras{shakeRecorder = recorder, ..} shakeDb <- - shakeNewDatabase + shakeNewDatabaseWithRuntime + (const $ pure ()) + (actionQueue shakeExtras) opts { shakeExtra = newShakeExtra shakeExtras } rules shakeSession <- newEmptyMVar @@ -766,7 +790,7 @@ shakeSessionInit recorder IdeState{..} = do -- Take a snapshot of the VFS - it should be empty as we've received no notifications -- till now, but it can't hurt to be in sync with the `lsp` library. vfs <- vfsSnapshot (lspEnv shakeExtras) - initSession <- newSession recorder shakeExtras (VFSModified vfs) shakeDb [] "shakeSessionInit" + initSession <- newSession recorder shakeExtras (VFSModified vfs) shakeDb [] "shakeSessionInit" Nothing putMVar shakeSession initSession logWith recorder Debug LogSessionInitialised @@ -776,6 +800,8 @@ shakeShut IdeState{..} = do -- Shake gets unhappy if you try to close when there is a running -- request so we first abort that. for_ runner cancelShakeSession + running <- shakePeekAsyncsDelivers shakeDb + shakeShutDatabase (fromListKeySet $ map GraphRuntime.deliverKey running) shakeDb void $ shakeDatabaseProfile shakeDb progressStop $ progress shakeExtras progressStop $ indexProgressReporting $ hiedbWriter shakeExtras @@ -793,8 +819,12 @@ withMVar' var unmasked masked = uninterruptibleMask $ \restore -> do pure c -mkDelayedAction :: String -> Logger.Priority -> Action a -> DelayedAction a -mkDelayedAction = DelayedAction Nothing +toGraphPriority :: Logger.Priority -> GraphRuntime.Priority +toGraphPriority = toEnum . fromEnum + +mkDelayedAction :: String -> Logger.Priority -> Action a -> IO (DelayedAction a) +mkDelayedAction name priority = + GraphDatabase.mkDelayedAction name (toGraphPriority priority) -- | These actions are run asynchronously after the current action is -- finished running. For example, to trigger a key build after a rule @@ -810,12 +840,15 @@ delayedAction a = do -- but actions added via 'shakeEnqueue' will be requeued. shakeRestart :: Recorder (WithPriority Log) -> IdeState -> VFSModified -> String -> [DelayedAction ()] -> IO [Key] -> IO () shakeRestart recorder IdeState{..} vfs reason acts ioActionBetweenShakeSession = - void $ awaitRunInThread (restartQueue shakeExtras) $ do + void $ awaitRunInThread (restartQueue shakeExtras) $ GraphRuntime.withShakeDatabaseValuesLock shakeDb $ do withMVar' shakeSession (\runner -> do - (stopTime,()) <- duration $ logErrorAfter 10 $ cancelShakeSession runner + logErrorAfter 10 $ cancelShakeSession runner keys <- ioActionBetweenShakeSession + IdeOptions{optRunSubset} <- getIdeOptionsIO shakeExtras + (stopTime, (runtimeKeysChanged, runtimeStats)) <- + duration $ prepareRuntimeRestart optRunSubset keys -- it is every important to update the dirty keys after we enter the critical section -- see Note [Housekeeping rule cache and dirty key outside of hls-graph] atomically $ modifyTVar' (dirtyKeys shakeExtras) $ \x -> foldl' (flip insertKeySet) x keys @@ -824,19 +857,55 @@ shakeRestart recorder IdeState{..} vfs reason acts ioActionBetweenShakeSession = queue <- atomicallyNamed "actionQueue - peek" $ peekInProgress $ actionQueue shakeExtras -- this log is required by tests - logWith recorder Debug $ LogBuildSessionRestart reason queue backlog stopTime res + logWith recorder Debug $ LogBuildSessionRestart reason queue backlog stopTime res runtimeStats + return runtimeKeysChanged ) -- It is crucial to be masked here, otherwise we can get killed -- between spawning the new thread and updating shakeSession. -- See https://github.com/haskell/ghcide/issues/79 - (\() -> do - (,()) <$> newSession recorder shakeExtras vfs shakeDb acts reason) + (\runtimeKeysChanged -> do + (,()) <$> newSession recorder shakeExtras vfs shakeDb acts reason runtimeKeysChanged) where logErrorAfter :: Seconds -> IO () -> IO () logErrorAfter seconds action = flip withAsync (const action) $ do sleep seconds logWith recorder Error (LogBuildSessionRestartTakingTooLong seconds) + prepareRuntimeRestart :: Bool -> [Key] -> IO (RuntimeKeysChanged, RuntimeRestartStats) + prepareRuntimeRestart optRunSubset keys + | optRunSubset = do + runtimeRestartKeys <- shakeComputeToPreserve shakeDb $ fromListKeySet keys + logErrorAfter 10 $ shakeShutDatabase (restartKillKeys runtimeRestartKeys) shakeDb + surviving <- shakePeekAsyncsDelivers shakeDb + queueCount <- shakeGetActionQueueLength shakeDb + let preserved = fromListKeySet $ map GraphRuntime.deliverKey surviving + pure + ( Just (runtimeRestartKeys, preserved) + , RuntimeRestartStats + { runtimeDirtyCount = length keys + , runtimeAffectedCount = lengthKeySet (restartKillKeys runtimeRestartKeys) + , runtimePreservedCount = lengthKeySet preserved + , runtimeActionQueueCount = queueCount + , runtimeSurvivingActions = map GraphRuntime.deliverName surviving + } + ) + | otherwise = do + running <- shakePeekAsyncsDelivers shakeDb + let affected = fromListKeySet $ map GraphRuntime.deliverKey running + logErrorAfter 10 $ shakeShutDatabase affected shakeDb + surviving <- shakePeekAsyncsDelivers shakeDb + queueCount <- shakeGetActionQueueLength shakeDb + pure + ( Nothing + , RuntimeRestartStats + { runtimeDirtyCount = length keys + , runtimeAffectedCount = lengthKeySet affected + , runtimePreservedCount = length surviving + , runtimeActionQueueCount = queueCount + , runtimeSurvivingActions = map GraphRuntime.deliverName surviving + } + ) + -- | Enqueue an action in the existing 'ShakeSession'. -- Returns a computation to block until the action is run, propagating exceptions. -- Assumes a 'ShakeSession' is available. @@ -861,6 +930,8 @@ shakeEnqueue ShakeExtras{actionQueue, shakeRecorder} act = do data VFSModified = VFSUnmodified | VFSModified !VFS +type RuntimeKeysChanged = Maybe (RuntimeRestartKeys, KeySet) + -- | Set up a new 'ShakeSession' with a set of initial actions -- Will crash if there is an existing 'ShakeSession' running. newSession @@ -870,44 +941,30 @@ newSession -> ShakeDatabase -> [DelayedActionInternal] -> String + -> RuntimeKeysChanged -> IO ShakeSession -newSession recorder extras@ShakeExtras{..} vfsMod shakeDb acts reason = do +newSession recorder ShakeExtras{..} vfsMod shakeDb acts reason runtimeKeysChanged = do -- Take a new VFS snapshot case vfsMod of VFSUnmodified -> pure () VFSModified vfs -> atomically $ writeTVar vfsVar vfs - IdeOptions{optRunSubset} <- getIdeOptionsIO extras + unless (null acts) $ + atomicallyNamed "actionQueue - push initial" $ + mapM_ (`pushQueue` actionQueue) acts reenqueued <- atomicallyNamed "actionQueue - peek" $ peekInProgress actionQueue - allPendingKeys <- - if optRunSubset - then Just <$> readTVarIO dirtyKeys - else return Nothing + startDatabase <- shakeRunDatabaseForKeysSep runtimeKeysChanged shakeDb [pumpActionThread shakeDb (const $ pure ())] let - -- A daemon-like action used to inject additional work - -- Runs actions from the work queue sequentially - pumpActionThread otSpan = do - d <- liftIO $ atomicallyNamed "action queue - pop" $ popQueue actionQueue - actionFork (run otSpan d) $ \_ -> pumpActionThread otSpan - - -- TODO figure out how to thread the otSpan into defineEarlyCutoff - run _otSpan d = do - start <- liftIO offsetTime - getAction d - liftIO $ atomicallyNamed "actionQueue - done" $ doneQueue d actionQueue - runTime <- liftIO start - logWith recorder (actionPriority d) $ LogDelayedAction d runTime - -- The inferred type signature doesn't work in ghc >= 9.0.1 workRun :: (forall b. IO b -> IO b) -> IO (IO ()) workRun restore = withSpan "Shake session" $ \otSpan -> do setTag otSpan "reason" (fromString reason) setTag otSpan "queue" (fromString $ unlines $ map actionName reenqueued) - whenJust allPendingKeys $ \kk -> setTag otSpan "keys" (BS8.pack $ unlines $ map show $ toListKeySet kk) - let keysActs = pumpActionThread otSpan : map (run otSpan) (reenqueued ++ acts) + whenJust runtimeKeysChanged $ \(runtimeRestartKeys, _) -> + setTag otSpan "keys" (BS8.pack $ unlines $ map show $ restartDirtyKeys runtimeRestartKeys) res <- try @SomeException $ - restore $ shakeRunDatabaseForKeys (toListKeySet <$> allPendingKeys) shakeDb keysActs + restore startDatabase return $ do let exception = case res of @@ -931,24 +988,6 @@ newSession recorder extras@ShakeExtras{..} vfsMod shakeDb acts reason = do pure (ShakeSession{..}) -instantiateDelayedAction - :: DelayedAction a - -> IO (Barrier (Either SomeException a), DelayedActionInternal) -instantiateDelayedAction (DelayedAction _ s p a) = do - u <- newUnique - b <- newBarrier - let a' = do - -- work gets reenqueued when the Shake session is restarted - -- it can happen that a work item finished just as it was reenqueued - -- in that case, skipping the work is fine - alreadyDone <- liftIO $ isJust <$> waitBarrierMaybe b - unless alreadyDone $ do - x <- actionCatch @SomeException (Right <$> a) (pure . Left) - -- ignore exceptions if the barrier has been filled concurrently - liftIO $ void $ try @SomeException $ signalBarrier b x - d' = DelayedAction (Just u) s p a' - return (b, d') - getDiagnostics :: IdeState -> STM [FileDiagnostic] getDiagnostics IdeState{shakeExtras = ShakeExtras{diagnostics}} = do getAllDiagnostics diagnostics @@ -1106,7 +1145,7 @@ useWithStaleFast' key file = do -- Async trigger the key to be built anyway because we want to -- keep updating the value in the key. - waitValue <- delayedAction $ mkDelayedAction ("C:" ++ show key ++ ":" ++ fromNormalizedFilePath file) Debug $ use key file + waitValue <- delayedAction =<< liftIO (mkDelayedAction ("C:" ++ show key ++ ":" ++ fromNormalizedFilePath file) Debug $ use key file) s@ShakeExtras{state} <- askShake r <- liftIO $ atomicallyNamed "useStateFast" $ getValues state key file diff --git a/ghcide/src/Development/IDE/Main.hs b/ghcide/src/Development/IDE/Main.hs index feb0050a79..b54f37b32b 100644 --- a/ghcide/src/Development/IDE/Main.hs +++ b/ghcide/src/Development/IDE/Main.hs @@ -40,7 +40,7 @@ import Development.IDE.Core.IdeConfiguration (IdeConfiguration (..) modifyClientSettings, registerIdeConfiguration) import Development.IDE.Core.OfInterest (FileOfInterestStatus (OnDisk), - kick, + doKick, setFilesOfInterest) import Development.IDE.Core.Rules (mainRule) import qualified Development.IDE.Core.Rules as Rules @@ -304,7 +304,7 @@ defaultMain recorder Arguments{..} = withHeapStats (cmapWithPrio LogHeapStats re argsParseConfig = getConfigFromNotification argsHlsPlugins rules = do argsRules - unless argsDisableKick $ action kick + unless argsDisableKick $ action $ doKick pluginRules plugins -- install the main and ghcide-plugin rules -- install the kick action, which triggers a typecheck on every diff --git a/ghcide/src/Development/IDE/Types/Action.hs b/ghcide/src/Development/IDE/Types/Action.hs index 0aedd1d0da..906a0be9e7 100644 --- a/ghcide/src/Development/IDE/Types/Action.hs +++ b/ghcide/src/Development/IDE/Types/Action.hs @@ -1,88 +1,31 @@ module Development.IDE.Types.Action - ( DelayedAction (..), - DelayedActionInternal, - ActionQueue, - newQueue, - pushQueue, - popQueue, - doneQueue, - peekInProgress, - abortQueue,countQueue) + ( Action + , DelayedAction (..) + , DelayedActionInternal + , ActionQueue + , newQueue + , pushQueue + , popQueue + , doneQueue + , peekInProgress + , abortQueue + , countQueue + , isActionQueueEmpty + , unGetQueue + , countInProgress + ) where import Control.Concurrent.STM -import Data.Hashable (Hashable (..)) -import Data.HashSet (HashSet) -import qualified Data.HashSet as Set -import Data.Unique (Unique) -import Development.IDE.Graph (Action) -import Ide.Logger -import Numeric.Natural - -data DelayedAction a = DelayedAction - { uniqueID :: Maybe Unique, - -- | Name we use for debugging - actionName :: String, - -- | Priority with which to log the action - actionPriority :: Priority, - -- | The payload - getAction :: Action a - } - deriving (Functor) - -type DelayedActionInternal = DelayedAction () - -instance Eq (DelayedAction a) where - a == b = uniqueID a == uniqueID b - -instance Hashable (DelayedAction a) where - hashWithSalt s = hashWithSalt s . uniqueID - -instance Show (DelayedAction a) where - show d = "DelayedAction: " ++ actionName d - ------------------------------------------------------------------------------- - -data ActionQueue = ActionQueue - { newActions :: TQueue DelayedActionInternal, - inProgress :: TVar (HashSet DelayedActionInternal) - } - -newQueue :: IO ActionQueue -newQueue = atomically $ do - newActions <- newTQueue - inProgress <- newTVar mempty - return ActionQueue {..} - -pushQueue :: DelayedActionInternal -> ActionQueue -> STM () -pushQueue act ActionQueue {..} = writeTQueue newActions act - --- | You must call 'doneQueue' to signal completion -popQueue :: ActionQueue -> STM DelayedActionInternal -popQueue ActionQueue {..} = do - x <- readTQueue newActions - modifyTVar inProgress (Set.insert x) - return x - --- | Completely remove an action from the queue -abortQueue :: DelayedActionInternal -> ActionQueue -> STM () -abortQueue x ActionQueue {..} = do - qq <- flushTQueue newActions - mapM_ (writeTQueue newActions) (filter (/= x) qq) - modifyTVar' inProgress (Set.delete x) - --- | Mark an action as complete when called after 'popQueue'. --- Has no effect otherwise -doneQueue :: DelayedActionInternal -> ActionQueue -> STM () -doneQueue x ActionQueue {..} = do - modifyTVar' inProgress (Set.delete x) - -countQueue :: ActionQueue -> STM Natural -countQueue ActionQueue{..} = do - backlog <- flushTQueue newActions - mapM_ (writeTQueue newActions) backlog - m <- Set.size <$> readTVar inProgress - return $ fromIntegral $ length backlog + m - -peekInProgress :: ActionQueue -> STM [DelayedActionInternal] -peekInProgress ActionQueue {..} = Set.toList <$> readTVar inProgress +import Development.IDE.Graph.Internal.Types (Action, ActionQueue, + DelayedAction (..), + DelayedActionInternal, + abortQueue, countQueue, + doneQueue, + isActionQueueEmpty, + newQueue, peekInProgress, + popQueue, pushQueue, + unGetQueue) + +countInProgress :: ActionQueue -> STM Int +countInProgress queue = length <$> peekInProgress queue diff --git a/ghcide/src/Development/IDE/Types/Shake.hs b/ghcide/src/Development/IDE/Types/Shake.hs index cc8f84e3b6..a14d2b575b 100644 --- a/ghcide/src/Development/IDE/Types/Shake.hs +++ b/ghcide/src/Development/IDE/Types/Shake.hs @@ -82,6 +82,7 @@ fromKey :: Typeable k => Key -> Maybe (k, NormalizedFilePath) fromKey (Key k) | Just (Q (k', f)) <- cast k = Just (k', f) | otherwise = Nothing +fromKey _ = Nothing -- | fromKeyType (Q (k,f)) = (typeOf k, f) fromKeyType :: Key -> Maybe (SomeTypeRep, NormalizedFilePath) @@ -91,6 +92,7 @@ fromKeyType (Key k) , Q (_, f) <- k = Just (SomeTypeRep a, f) | otherwise = Nothing +fromKeyType _ = Nothing toNoFileKey :: (Show k, Typeable k, Eq k, Hashable k) => k -> Key toNoFileKey k = newKey $ Q (k, emptyFilePath) diff --git a/hls-graph/hls-graph.cabal b/hls-graph/hls-graph.cabal index 52bec5beac..024e44436b 100644 --- a/hls-graph/hls-graph.cabal +++ b/hls-graph/hls-graph.cabal @@ -70,6 +70,7 @@ library autogen-modules: Paths_hls_graph hs-source-dirs: src build-depends: + , mtl ^>=2.3.1 , aeson , async >=2.0 , base >=4.12 && <5 @@ -92,6 +93,7 @@ library , transformers , unliftio , unordered-containers + , prettyprinter if flag(embed-files) cpp-options: -DFILE_EMBED diff --git a/hls-graph/src/Development/IDE/Graph.hs b/hls-graph/src/Development/IDE/Graph.hs index 81ad3b3dfd..915da203aa 100644 --- a/hls-graph/src/Development/IDE/Graph.hs +++ b/hls-graph/src/Development/IDE/Graph.hs @@ -18,6 +18,7 @@ module Development.IDE.Graph( -- * Actions for inspecting the keys in the database getDirtySet, getKeysAndVisitedAge, + module Development.IDE.Graph.KeyMap, module Development.IDE.Graph.KeySet, ) where diff --git a/hls-graph/src/Development/IDE/Graph/Database.hs b/hls-graph/src/Development/IDE/Graph/Database.hs index bd8601cd16..9dbe51814d 100644 --- a/hls-graph/src/Development/IDE/Graph/Database.hs +++ b/hls-graph/src/Development/IDE/Graph/Database.hs @@ -2,17 +2,40 @@ module Development.IDE.Graph.Database( ShakeDatabase, ShakeValue, shakeNewDatabase, + shakeNewDatabaseWithRuntime, shakeRunDatabase, shakeRunDatabaseForKeys, + shakeRunDatabaseWithExceptions, + shakeRunDatabaseForKeysWithExceptions, + shakeRunDatabaseForKeysSep, shakeProfileDatabase, shakeGetBuildStep, shakeGetDatabaseKeys, shakeGetDirtySet, shakeGetCleanKeys - ,shakeGetBuildEdges) where -import Control.Concurrent.STM.Stats (readTVarIO) + ,shakeGetBuildEdges, + shakeShutDatabase, + shakeGetActionQueueLength, + RuntimeRestartKeys(..), + shakeComputeToPreserve, + -- shakedatabaseRuntimeDep, + shakePeekAsyncsDelivers, + instantiateDelayedAction, + mkDelayedAction, + shakeDatabaseSize) where +import Control.Concurrent.Extra (Barrier, newBarrier, + signalBarrier, + waitBarrierMaybe) +import Control.Concurrent.STM.Stats (atomically, + atomicallyNamed, + readTVarIO) +import Control.Exception (SomeException, + throwIO, try) +import Control.Monad (join, unless, void) +import Control.Monad.IO.Class (liftIO) import Data.Dynamic import Data.Maybe +import Data.Unique import Development.IDE.Graph.Classes () import Development.IDE.Graph.Internal.Action import Development.IDE.Graph.Internal.Database @@ -21,20 +44,33 @@ import Development.IDE.Graph.Internal.Options import Development.IDE.Graph.Internal.Profile (writeProfile) import Development.IDE.Graph.Internal.Rules import Development.IDE.Graph.Internal.Types +import qualified Development.IDE.Graph.Internal.Types as Logger +import qualified StmContainers.Map as SMap -- Placeholder to be the 'extra' if the user doesn't set it data NonExportedType = NonExportedType +shakeShutDatabase :: KeySet -> ShakeDatabase -> IO () +shakeShutDatabase dirties (ShakeDatabase _ _ db) = shutDatabase dirties db + shakeNewDatabase :: ShakeOptions -> Rules () -> IO ShakeDatabase shakeNewDatabase opts rules = do + aq <- newQueue + shakeNewDatabaseWithRuntime (const $ pure ()) aq opts rules + +shakeNewDatabaseWithRuntime :: (String -> IO ()) -> ActionQueue -> ShakeOptions -> Rules () -> IO ShakeDatabase +shakeNewDatabaseWithRuntime l aq opts rules = do let extra = fromMaybe (toDyn NonExportedType) $ shakeExtra opts (theRules, actions) <- runRules extra rules - db <- newDatabase extra theRules + db <- newDatabase l aq extra theRules pure $ ShakeDatabase (length actions) actions db shakeRunDatabase :: ShakeDatabase -> [Action a] -> IO [a] -shakeRunDatabase = shakeRunDatabaseForKeys Nothing +shakeRunDatabase s xs = shakeRunDatabaseForKeys Nothing s xs + +shakeRunDatabaseWithExceptions :: ShakeDatabase -> [Action a] -> IO [Either SomeException a] +shakeRunDatabaseWithExceptions s xs = shakeRunDatabaseForKeysWithExceptions Nothing s xs -- | Returns the set of dirty keys annotated with their age (in # of builds) shakeGetDirtySet :: ShakeDatabase -> IO [(Key, Int)] @@ -52,19 +88,85 @@ unvoid :: Functor m => m () -> m a unvoid = fmap undefined -- | Assumes that the database is not running a build +-- The nested IO is to +-- seperate incrementing the step from running the build. +-- Also immediately enqueues upsweep actions for the newly dirty keys. +shakeRunDatabaseForKeysSep + :: Maybe (RuntimeRestartKeys, KeySet) -- ^ Set of keys changed since last run. 'Nothing' means everything has changed + -> ShakeDatabase + -> [Action a] + -> IO (IO [Either SomeException a]) +shakeRunDatabaseForKeysSep keysChanged sdb@(ShakeDatabase _ as1 db) acts = do + preserves <- incDatabase db keysChanged + reenqueued <- atomicallyNamed "actionQueue - peek" $ peekInProgress (databaseActionQueue db) + let reenqueuedExceptPreserves = filter (\d -> uniqueID d `notMemberKeySet` preserves) reenqueued + let ignoreResultActs = as1 + return $ do + seqRunActions (newKey "root") db $ map (pumpActionThreadReRun sdb) reenqueuedExceptPreserves + drop (length ignoreResultActs) <$> runActions (newKey "root") db (map unvoid ignoreResultActs ++ acts) + +instantiateDelayedAction + :: DelayedAction a + -> IO (Barrier (Either SomeException a), DelayedActionInternal) +instantiateDelayedAction (DelayedAction u s p a) = do + b <- newBarrier + let a' = do + -- work gets reenqueued when the Shake session is restarted + -- it can happen that a work item finished just as it was reenqueued + -- in that case, skipping the work is fine + alreadyDone <- liftIO $ isJust <$> waitBarrierMaybe b + unless alreadyDone $ do + x <- actionCatch @SomeException (Right <$> a) (pure . Left) + -- ignore exceptions if the barrier has been filled concurrently + liftIO $ void $ try @SomeException $ signalBarrier b x + d' = DelayedAction u s p a' + return (b, d') + +mkDelayedAction :: String -> Logger.Priority -> Action a -> IO (DelayedAction a) +mkDelayedAction s p a = do + u <- newUnique + return $ DelayedAction (newDirectKey $ hashUnique u) s (toEnum (fromEnum p)) a + +shakeComputeToPreserve :: ShakeDatabase -> KeySet -> IO RuntimeRestartKeys +shakeComputeToPreserve (ShakeDatabase _ _ db) ks = atomically (computeToPreserve db ks) + shakeRunDatabaseForKeys :: Maybe [Key] -- ^ Set of keys changed since last run. 'Nothing' means everything has changed -> ShakeDatabase -> [Action a] -> IO [a] -shakeRunDatabaseForKeys keysChanged (ShakeDatabase lenAs1 as1 db) as2 = do - incDatabase db keysChanged - fmap (drop lenAs1) $ runActions db $ map unvoid as1 ++ as2 +shakeRunDatabaseForKeys keysChanged sdb as2 = + shakeRunDatabaseForKeysWithExceptions keysChanged sdb as2 >>= traverse (either throwIO pure) + +shakeRunDatabaseForKeysWithExceptions + :: Maybe [Key] + -- ^ Set of keys changed since last run. 'Nothing' means everything has changed + -> ShakeDatabase + -> [Action a] + -> IO [Either SomeException a] +shakeRunDatabaseForKeysWithExceptions Nothing sdb as2 = join $ shakeRunDatabaseForKeysSep Nothing sdb as2 +shakeRunDatabaseForKeysWithExceptions (Just x) sdb as2 = + let y = fromListKeySet x + restartKeys = RuntimeRestartKeys + { restartKillKeys = y + , restartDirtyKeys = toListKeySet y + } + in join $ shakeRunDatabaseForKeysSep (Just (restartKeys, y)) sdb as2 + + +shakePeekAsyncsDelivers :: ShakeDatabase -> IO [DeliverStatus] +shakePeekAsyncsDelivers (ShakeDatabase _ _ db) = peekAsyncsDelivers db + +shakeDatabaseSize :: ShakeDatabase -> IO Int +shakeDatabaseSize (ShakeDatabase _ _ db) = databaseSize db + +databaseSize :: Database -> IO Int +databaseSize db = atomically $ SMap.size $ databaseValues db -- | Given a 'ShakeDatabase', write an HTML profile to the given file about the latest run. shakeProfileDatabase :: ShakeDatabase -> FilePath -> IO () -shakeProfileDatabase (ShakeDatabase _ _ s) file = writeProfile file s +shakeProfileDatabase (ShakeDatabase _ _ db) file = writeProfile file db -- | Returns the clean keys in the database shakeGetCleanKeys :: ShakeDatabase -> IO [(Key, Result )] @@ -83,3 +185,7 @@ shakeGetBuildEdges (ShakeDatabase _ _ db) = do -- annotated with how long ago (in # builds) they were visited shakeGetDatabaseKeys :: ShakeDatabase -> IO [(Key, Int)] shakeGetDatabaseKeys (ShakeDatabase _ _ db) = getKeysAndVisitAge db + +shakeGetActionQueueLength :: ShakeDatabase -> IO Int +shakeGetActionQueueLength (ShakeDatabase _ _ db) = do + fromIntegral <$> atomically (countQueue (databaseActionQueue db)) diff --git a/hls-graph/src/Development/IDE/Graph/Internal/Action.hs b/hls-graph/src/Development/IDE/Graph/Internal/Action.hs index 6d47d9b511..4db7899b80 100644 --- a/hls-graph/src/Development/IDE/Graph/Internal/Action.hs +++ b/hls-graph/src/Development/IDE/Graph/Internal/Action.hs @@ -14,14 +14,22 @@ module Development.IDE.Graph.Internal.Action , runActions , Development.IDE.Graph.Internal.Action.getDirtySet , getKeysAndVisitedAge +, isAsyncException +, pumpActionThread +, pumpActionThreadReRun +, sequenceRun +, seqRunActions ) where import Control.Concurrent.Async +import Control.Concurrent.STM.Stats (atomicallyNamed) import Control.DeepSeq (force) import Control.Exception +import Control.Monad (void) import Control.Monad.IO.Class +import Control.Monad.RWS (MonadReader (ask), + asks) import Control.Monad.Trans.Class -import Control.Monad.Trans.Reader import Data.Foldable (toList) import Data.Functor.Identity import Data.IORef @@ -31,66 +39,91 @@ import Development.IDE.Graph.Internal.Key import Development.IDE.Graph.Internal.Rules (RuleResult) import Development.IDE.Graph.Internal.Types import System.Exit +import UnliftIO (atomically) type ShakeValue a = (Show a, Typeable a, Eq a, Hashable a, NFData a) -- | Always rerun this rule when dirty, regardless of the dependencies. alwaysRerun :: Action () alwaysRerun = do - ref <- Action $ asks actionDeps + ref <- asks actionDeps liftIO $ modifyIORef' ref (AlwaysRerunDeps mempty <>) -parallel :: [Action a] -> Action [a] -parallel [] = pure [] -parallel [x] = fmap (:[]) x +-- parallel :: [Action a] -> Action [Either SomeException a] +-- parallel [] = return [] +-- parallel xs = do +-- a <- ask +-- deps <- liftIO $ readIORef $ actionDeps a +-- case deps of +-- UnknownDeps -> +-- -- if we are already in the rerun mode, nothing we do is going to impact our state +-- -- runActionInDb "parallel" xs +-- runActionInDb "parallel" xs +-- deps -> error $ "parallel not supported when we have precise dependencies: " ++ show deps + +parallel :: [Action a] -> Action [Either SomeException a] +parallel [] = return [] parallel xs = do - a <- Action ask + a <- ask deps <- liftIO $ readIORef $ actionDeps a case deps of - UnknownDeps -> + UnknownDeps -> do -- if we are already in the rerun mode, nothing we do is going to impact our state - liftIO $ mapConcurrently (ignoreState a) xs - deps -> do - (newDeps, res) <- liftIO $ unzip <$> mapConcurrently (usingState a) xs - liftIO $ writeIORef (actionDeps a) $ mconcat $ deps : newDeps - pure res - where - usingState a x = do - ref <- newIORef mempty - res <- runReaderT (fromAction x) a{actionDeps=ref} - deps <- readIORef ref - pure (deps, res) + -- runActionInDb "parallel" xs + liftIO $ mapConcurrently (fmap Right . ignoreState a) xs + deps -> error $ "parallel not supported when we have precise dependencies: " ++ show deps + +pumpActionThreadReRun :: ShakeDatabase -> DelayedAction () -> Action () +pumpActionThreadReRun (ShakeDatabase _ _ db) d = do + a <- ask + s <- atomically $ getDataBaseStepInt db + liftIO $ runInThreadStmInNewThreads db + (DeliverStatus s (actionName d) (uniqueID d)) + (ignoreState a $ runOne d) (const $ return ()) + where + runOne d = setActionKey (uniqueID d) $ do + _ <- getAction d + liftIO $ atomically $ doneQueue d (databaseActionQueue db) + +pumpActionThread :: ShakeDatabase -> (String -> IO ()) -> Action b +pumpActionThread sdb@(ShakeDatabase _ _ db) logMsg = do + do + d <- liftIO $ atomicallyNamed "action queue - pop" $ popQueue (databaseActionQueue db) + pumpActionThreadReRun sdb d + pumpActionThread sdb logMsg ignoreState :: SAction -> Action b -> IO b ignoreState a x = do ref <- newIORef mempty - runReaderT (fromAction x) a{actionDeps=ref} + runActionMonad x a{actionDeps=ref} actionFork :: Action a -> (Async a -> Action b) -> Action b actionFork act k = do - a <- Action ask + a <- ask deps <- liftIO $ readIORef $ actionDeps a let db = actionDatabase a case deps of UnknownDeps -> do - -- if we are already in the rerun mode, nothing we do is going to impact our state - [res] <- liftIO $ withAsync (ignoreState a act) $ \as -> runActions db [k as] - return res + [res] <- liftIO $ withAsync (ignoreState a act) $ \as -> + runActions (actionKey a) db [k as] + liftIO $ either throwIO pure res _ -> - error "please help me" + error "actionFork is only supported when dependencies are unknown" isAsyncException :: SomeException -> Bool isAsyncException e + | Just (_ :: SomeAsyncException) <- fromException e = True | Just (_ :: AsyncCancelled) <- fromException e = True | Just (_ :: AsyncException) <- fromException e = True + | Just (_ :: AsyncParentKill) <- fromException e = True | Just (_ :: ExitCode) <- fromException e = True | otherwise = False actionCatch :: Exception e => Action a -> (e -> Action a) -> Action a actionCatch a b = do - v <- Action ask - Action $ lift $ catchJust f (runReaderT (fromAction a) v) (\x -> runReaderT (fromAction (b x)) v) + v <- ask + liftIO $ catchJust f (runActionMonad a v) (\x -> runActionMonad (b x) v) where -- Catch only catches exceptions that were caused by this code, not those that -- are a result of program termination @@ -99,23 +132,24 @@ actionCatch a b = do actionBracket :: IO a -> (a -> IO b) -> (a -> Action c) -> Action c actionBracket a b c = do - v <- Action ask - Action $ lift $ bracket a b (\x -> runReaderT (fromAction (c x)) v) + v <- ask + liftIO $ bracket a b (\x -> runActionMonad (c x) v) actionFinally :: Action a -> IO b -> Action a actionFinally a b = do v <- Action ask - Action $ lift $ finally (runReaderT (fromAction a) v) b + Action $ lift $ finally (runActionMonad a v) b apply1 :: (RuleResult key ~ value, ShakeValue key, Typeable value) => key -> Action value apply1 k = runIdentity <$> apply (Identity k) apply :: (Traversable f, RuleResult key ~ value, ShakeValue key, Typeable value) => f key -> Action (f value) apply ks = do - db <- Action $ asks actionDatabase - stack <- Action $ asks actionStack - (is, vs) <- liftIO $ build db stack ks - ref <- Action $ asks actionDeps + db <- asks actionDatabase + stack <- asks actionStack + pk <- getActionKey + (is, vs) <- liftIO $ build pk db stack ks + ref <- asks actionDeps let !ks = force $ fromListKeySet $ toList is liftIO $ modifyIORef' ref (ResultDeps [ks] <>) pure vs @@ -123,15 +157,27 @@ apply ks = do -- | Evaluate a list of keys without recording any dependencies. applyWithoutDependency :: (Traversable f, RuleResult key ~ value, ShakeValue key, Typeable value) => f key -> Action (f value) applyWithoutDependency ks = do - db <- Action $ asks actionDatabase - stack <- Action $ asks actionStack - (_, vs) <- liftIO $ build db stack ks + db <- asks actionDatabase + stack <- asks actionStack + pk <- getActionKey + (_, vs) <- liftIO $ build pk db stack ks pure vs -runActions :: Database -> [Action a] -> IO [a] -runActions db xs = do +runActions :: Key -> Database -> [Action a] -> IO [Either SomeException a] +runActions pk db xs = do + deps <- newIORef mempty + runActionMonad (parallel xs) $ SAction pk db deps emptyStack + +seqRunActions :: Key -> Database -> [Action a] -> IO () +seqRunActions pk db xs = do deps <- newIORef mempty - runReaderT (fromAction $ parallel xs) $ SAction db deps emptyStack + runActionMonad (sequenceRun xs) $ SAction pk db deps emptyStack + +sequenceRun :: [Action a] -> Action () +sequenceRun [] = return () +sequenceRun (x:xs) = do + void x + sequenceRun xs -- | Returns the set of dirty keys annotated with their age (in # of builds) getDirtySet :: Action [(Key, Int)] diff --git a/hls-graph/src/Development/IDE/Graph/Internal/Database.hs b/hls-graph/src/Development/IDE/Graph/Internal/Database.hs index 359e5ceb6a..5318beda6e 100644 --- a/hls-graph/src/Development/IDE/Graph/Internal/Database.hs +++ b/hls-graph/src/Development/IDE/Graph/Internal/Database.hs @@ -5,42 +5,42 @@ {-# LANGUAGE CPP #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE TypeFamilies #-} -module Development.IDE.Graph.Internal.Database (compute, newDatabase, incDatabase, build, getDirtySet, getKeysAndVisitAge) where +module Development.IDE.Graph.Internal.Database (compute, newDatabase, incDatabase, build, getDirtySet, getKeysAndVisitAge, AsyncParentKill(..), RuntimeRestartKeys(..), computeToPreserve, getRunTimeRDeps, spawnAsyncWithDbRegistration) where import Prelude hiding (unzip) -import Control.Concurrent.Async -import Control.Concurrent.Extra -import Control.Concurrent.STM.Stats (STM, atomically, - atomicallyNamed, +import Control.Concurrent.STM.Stats (STM, atomicallyNamed, modifyTVar', newTVarIO, - readTVarIO) + readTVar, readTVarIO, + retry) import Control.Exception import Control.Monad import Control.Monad.IO.Class (MonadIO (liftIO)) -import Control.Monad.Trans.Class (lift) import Control.Monad.Trans.Reader -import qualified Control.Monad.Trans.State.Strict as State import Data.Dynamic -import Data.Either -import Data.Foldable (for_, traverse_) +import Data.Foldable (foldrM) import Data.IORef.Extra import Data.Maybe import Data.Traversable (for) import Data.Tuple.Extra -import Debug.Trace (traceM) import Development.IDE.Graph.Classes import Development.IDE.Graph.Internal.Key import Development.IDE.Graph.Internal.Rules import Development.IDE.Graph.Internal.Types +import Development.IDE.Graph.Internal.Types () import qualified Focus import qualified ListT import qualified StmContainers.Map as SMap -import System.IO.Unsafe -import System.Time.Extra (duration, sleep) +import System.Time.Extra (duration) +import UnliftIO (MVar, atomically, + newEmptyMVar, putMVar, + readMVar) + +import qualified UnliftIO.Exception as UE #if MIN_VERSION_base(4,19,0) import Data.Functor (unzip) @@ -49,95 +49,161 @@ import Data.List.NonEmpty (unzip) #endif -newDatabase :: Dynamic -> TheRules -> IO Database -newDatabase databaseExtra databaseRules = do +newDatabase :: (String -> IO ()) -> ActionQueue -> Dynamic -> TheRules -> IO Database +newDatabase dataBaseLogger databaseActionQueue databaseExtra databaseRules = do databaseStep <- newTVarIO $ Step 0 + databaseThreads <- newTVarIO [] + databaseValuesLock <- newTVarIO True databaseValues <- atomically SMap.new + databaseRRuntimeDep <- atomically SMap.new + databaseRuntimeDepRoot <- atomically SMap.new + databaseRRuntimeDepRoot <- atomically SMap.new + databaseTransitiveRRuntimeDepCache <- atomically SMap.new pure Database{..} -- | Increment the step and mark dirty. -- Assumes that the database is not running a build -incDatabase :: Database -> Maybe [Key] -> IO () -- only some keys are dirty -incDatabase db (Just kk) = do - atomicallyNamed "incDatabase" $ modifyTVar' (databaseStep db) $ \(Step i) -> Step $ i + 1 - transitiveDirtyKeys <- transitiveDirtySet db kk - for_ (toListKeySet transitiveDirtyKeys) $ \k -> - -- Updating all the keys atomically is not necessary - -- since we assume that no build is mutating the db. - -- Therefore run one transaction per key to minimise contention. - atomicallyNamed "incDatabase" $ SMap.focus updateDirty k (databaseValues db) +incDatabase :: Database -> Maybe (RuntimeRestartKeys, KeySet) -> IO KeySet +incDatabase db (Just (RuntimeRestartKeys{..}, preserves)) = do + atomicallyNamed "incDatabase" $ modifyTVar' (databaseStep db) $ \(Step i) -> Step $ i + 1 + forM_ restartDirtyKeys $ \newKey -> atomically $ SMap.focus updateDirty newKey (databaseValues db) + -- Only re-enqueue actions that were not preserved across the restart. + return $ preserves -- all keys are dirty incDatabase db Nothing = do atomically $ modifyTVar' (databaseStep db) $ \(Step i) -> Step $ i + 1 let list = SMap.listT (databaseValues db) + -- all running keys are also dirty atomicallyNamed "incDatabase - all " $ flip ListT.traverse_ list $ \(k,_) -> SMap.focus updateDirty k (databaseValues db) + return $ mempty + +data RuntimeRestartKeys = RuntimeRestartKeys + { restartKillKeys :: !KeySet + -- ^ Keys used to select running runtime actions to stop before the next + -- session starts. This may include rule keys and delayed-action 'DirectKey's. + , restartDirtyKeys :: ![Key] + -- ^ Rule database keys to mark dirty before the next run. In the ghcide + -- restart path this is rule-key-only by construction; the raw hls-graph API + -- does not enforce that invariant by type. + } deriving Show + +-- Note [RuntimeRestartKeys] +-- The restart plan intentionally keeps runtime cancellation separate from rule +-- dirtiness. 'restartKillKeys' is consumed by shutdown and may include direct +-- delayed-action keys. 'restartDirtyKeys' is consumed by the rule database and +-- is expected to contain only rule keys that can be marked dirty. +-- For the ghcide restart path, the initial dirty seeds come from rule keys +-- ('toKey'/'toNoFileKey'), so 'restartDirtyKeys' can use the +-- 'databaseRRuntimeDep' closure directly. Direct/root runtime edges are stored +-- separately in 'databaseRRuntimeDepRoot' by 'insertdatabaseRuntimeDep' and are +-- expanded only for 'restartKillKeys'. The raw hls-graph API does not enforce +-- this seed invariant by type. +computeToPreserve :: Database -> KeySet -> STM RuntimeRestartKeys +computeToPreserve = transitiveDirtyKeysBottomUp updateDirty :: Monad m => Focus.Focus KeyDetails m () updateDirty = Focus.adjust $ \(KeyDetails status rdeps) -> let status' - | Running _ _ _ x <- status = Dirty x + | Running _ x <- status = Dirty x | Clean x <- status = Dirty (Just x) | otherwise = status in KeyDetails status' rdeps + + +-- updateClean :: Monad m => Focus.Focus KeyDetails m () +-- updateClean = Focus.adjust $ \(KeyDetails _ rdeps) -> -- | Unwrap and build a list of keys in parallel -build - :: forall f key value . (Traversable f, RuleResult key ~ value, Typeable key, Show key, Hashable key, Eq key, Typeable value) - => Database -> Stack -> f key -> IO (f Key, f value) --- build _ st k | traceShow ("build", st, k) False = undefined -build db stack keys = do - built <- runAIO $ do - built <- builder db stack (fmap newKey keys) - case built of - Left clean -> return clean - Right dirty -> liftIO dirty - let (ids, vs) = unzip built - pure (ids, fmap (asV . resultValue) vs) - where - asV :: Value -> value - asV (Value x) = unwrapDynamic x +build :: + forall f key value. + (Traversable f, RuleResult key ~ value, Typeable key, Show key, Hashable key, Eq key, Typeable value) => + Key -> Database -> Stack -> f key -> IO (f Key, f value) +build pk db stack keys = do + built <- builder pk db stack (fmap newKey keys) + let (ids, vs) = unzip built + pure (ids, fmap (asV . resultValue) vs) + where + asV :: Value -> value + asV (Value x) = unwrapDynamic x + -- | Build a list of keys and return their results. -- If none of the keys are dirty, we can return the results immediately. -- Otherwise, a blocking computation is returned *which must be evaluated asynchronously* to avoid deadlock. -builder - :: Traversable f => Database -> Stack -> f Key -> AIO (Either (f (Key, Result)) (IO (f (Key, Result)))) --- builder _ st kk | traceShow ("builder", st,kk) False = undefined -builder db@Database{..} stack keys = withRunInIO $ \(RunInIO run) -> do - -- Things that I need to force before my results are ready - toForce <- liftIO $ newTVarIO [] - current <- liftIO $ readTVarIO databaseStep - results <- liftIO $ for keys $ \id -> - -- Updating the status of all the dependencies atomically is not necessary. - -- Therefore, run one transaction per dep. to avoid contention - atomicallyNamed "builder" $ do - -- Spawn the id if needed - status <- SMap.lookup id databaseValues - val <- case viewDirty current $ maybe (Dirty Nothing) keyStatus status of - Clean r -> pure r - Running _ force val _ - | memberStack id stack -> throw $ StackException stack - | otherwise -> do - modifyTVar' toForce (Wait force :) - pure val - Dirty s -> do - let act = run (refresh db stack id s) - (force, val) = splitIO (join act) - SMap.focus (updateStatus $ Running current force val s) id databaseValues - modifyTVar' toForce (Spawn force:) - pure val - - pure (id, val) - - toForceList <- liftIO $ readTVarIO toForce - let waitAll = run $ waitConcurrently_ toForceList - case toForceList of - [] -> return $ Left results - _ -> return $ Right $ do - waitAll - pure results +builder :: (Traversable f) => Key -> Database -> Stack -> f Key -> IO (f (Key, Result)) +builder pk db stack keys = do + waits <- for keys (\k -> builderOne pk db stack k) + for waits (interpreBuildContinue db pk) + +-- the first run should not block +data BuildContinue + = BCContinue !(Maybe (MVar (Either SomeException (Key, Result)))) + | BCStop Key Result + +-- interpreBuildContinue :: BuildContinue -> IO (Key, Result) +interpreBuildContinue :: Database -> Key -> (Key, BuildContinue) -> IO (Key, Result) +interpreBuildContinue _db _pk (_kid, BCStop k v) = return (k, v) +interpreBuildContinue db _pk (kid, BCContinue Nothing) = builderOneFinal db emptyStack kid +interpreBuildContinue _db _pk (_kid, BCContinue (Just barrier)) = + readMVar barrier >>= either throwIO pure + + +builderOne :: Key -> Database -> Stack -> Key -> IO (Key, BuildContinue) +builderOne parentKey db stack kid = do + r <- builderOne' parentKey db stack kid + return (kid, r) + +builderOneFinal :: Database -> Stack -> Key -> IO (Key, Result) +builderOneFinal Database {..} stack key = do + -- join is used to register the async + atomicallyNamed "builder" $ do + status <- SMap.lookup key databaseValues + case (viewToRun $ keyStatus <$> status) of + (Dirty _prev) -> retry + (Clean r) -> return (key, r) + (Running _step _s) + | memberStack key stack -> throw $ StackException stack + | otherwise -> retry + +builderOne' :: Key -> Database -> Stack -> Key -> IO BuildContinue +builderOne' parentKey db@Database {..} stack key = UE.uninterruptibleMask $ \restore -> do + atomicallyNamed "builder" $ insertdatabaseRuntimeDep key parentKey db + barrier <- newEmptyMVar + -- join is used to register the async + join $ restore $ mask_ $ atomicallyNamed "builder" $ do + dbNotLocked db + status <- SMap.lookup key databaseValues + current <- readTVar databaseStep + + case (viewToRun $ keyStatus <$> status) of + (Dirty prev) -> do + SMap.focus (updateStatus $ Running current prev) key databaseValues + let register = spawnRefresh db stack key barrier prev refresh + -- why it is important to use rollback here + + {- Note [Rollback is required if killed before registration] + It is important to use rollback here because a key might be killed before it is registered, even though it is not one of the dirty keys. + In this case, it would skip being marked as dirty. Therefore, we have to roll back here if it is killed, to ensure consistency. + -} + (\_ -> atomicallyNamed "builderOne rollback" $ SMap.focus updateDirty key databaseValues) + restore + return $ register >> return (BCContinue (Just barrier)) + (Clean r) -> pure . pure $ BCStop key r + (Running _step _s) + | memberStack key stack -> throw $ StackException stack + | otherwise -> pure . pure $ BCContinue Nothing + +-- Original spawnRefresh implementation moved below to use the abstraction +-- handleResult :: (Show a1, MonadIO m) => a1 -> MVar (Either a2 (a1, b)) -> Either a2 b -> m () +handleResult :: MonadIO m => Key -> MVar (Either SomeException (Key, b)) -> Either SomeException b -> m () +handleResult k barrier eResult = do + case eResult of + Right r -> putMVar barrier (Right (k, r)) + -- accumulate the async kill info for debugging + Left e | Just (AsyncParentKill tid s ks) <- fromException e -> putMVar barrier (Left (toException $ AsyncParentKill tid s (k:ks))) + Left e -> putMVar barrier (Left e) -- | isDirty @@ -145,6 +211,7 @@ builder db@Database{..} stack keys = withRunInIO $ \(RunInIO run) -> do isDirty :: Foldable t => Result -> t (a, Result) -> Bool isDirty me = any (\(_,dep) -> resultBuilt me < resultChanged dep) + -- | Refresh dependencies for a key and compute the key: -- The refresh the deps linearly(last computed order of the deps for the key). -- If any of the deps is dirty in the process, we jump to the actual computation of the key @@ -152,44 +219,35 @@ isDirty me = any (\(_,dep) -> resultBuilt me < resultChanged dep) -- * If no dirty dependencies and we have evaluated the key previously, then we refresh it in the current thread. -- This assumes that the implementation will be a lookup -- * Otherwise, we spawn a new thread to refresh the dirty deps (if any) and the key itself -refreshDeps :: KeySet -> Database -> Stack -> Key -> Result -> [KeySet] -> AIO Result +refreshDeps :: KeySet -> Database -> Stack -> Key -> Result -> [KeySet] -> IO Result refreshDeps visited db stack key result = \case -- no more deps to refresh - [] -> liftIO $ compute db stack key RunDependenciesSame (Just result) + [] -> compute db stack key RunDependenciesSame (Just result) (dep:deps) -> do let newVisited = dep <> visited - res <- builder db stack (toListKeySet (dep `differenceKeySet` visited)) - case res of - Left res -> if isDirty result res + res <- builder key db stack (toListKeySet (dep `differenceKeySet` visited)) + if isDirty result res -- restart the computation if any of the deps are dirty - then liftIO $ compute db stack key RunDependenciesChanged (Just result) + then compute db stack key RunDependenciesChanged (Just result) -- else kick the rest of the deps else refreshDeps newVisited db stack key result deps - Right iores -> do - res <- liftIO iores - if isDirty result res - then liftIO $ compute db stack key RunDependenciesChanged (Just result) - else refreshDeps newVisited db stack key result deps - --- | Refresh a key: -refresh :: Database -> Stack -> Key -> Maybe Result -> AIO (IO Result) --- refresh _ st k _ | traceShow ("refresh", st, k) False = undefined + + +refresh :: Database -> Stack -> Key -> Maybe Result -> IO Result refresh db stack key result = case (addStack key stack, result) of (Left e, _) -> throw e - (Right stack, Just me@Result{resultDeps = ResultDeps deps}) -> asyncWithCleanUp $ refreshDeps mempty db stack key me (reverse deps) - (Right stack, _) -> - asyncWithCleanUp $ liftIO $ compute db stack key RunDependenciesChanged result - + (Right stack, Just me@Result{resultDeps = ResultDeps deps}) -> refreshDeps mempty db stack key me (reverse deps) + (Right stack, _) -> compute db stack key RunDependenciesChanged result -- | Compute a key. compute :: Database -> Stack -> Key -> RunMode -> Maybe Result -> IO Result --- compute _ st k _ _ | traceShow ("compute", st, k) False = undefined compute db@Database{..} stack key mode result = do let act = runRule databaseRules key (fmap resultData result) mode - deps <- newIORef UnknownDeps + deps <- liftIO $ newIORef UnknownDeps + curStep <- liftIO $ readTVarIO databaseStep + -- dataBaseLogger $ "Computing key: " ++ show key ++ " at step " ++ show curStep (execution, RunResult{..}) <- - duration $ runReaderT (fromAction act) $ SAction db deps stack - curStep <- readTVarIO databaseStep - deps <- readIORef deps + liftIO $ duration $ runReaderT (fromAction act) $ SAction key db deps stack + deps <- liftIO $ readIORef deps let lastChanged = maybe curStep resultChanged result let lastBuild = maybe curStep resultBuilt result -- changed time is always older than or equal to build time @@ -203,22 +261,23 @@ compute db@Database{..} stack key mode result = do let -- only update the deps when the rule ran with changes actualDeps = if runChanged /= ChangedNothing then deps else previousDeps previousDeps= maybe UnknownDeps resultDeps result - let res = Result runValue built changed curStep actualDeps execution runStore - case getResultDepsDefault mempty actualDeps of - deps | not (nullKeySet deps) - && runChanged /= ChangedNothing - -> do - -- IMPORTANT: record the reverse deps **before** marking the key Clean. - -- If an async exception strikes before the deps have been recorded, - -- we won't be able to accurately propagate dirtiness for this key - -- on the next build. - void $ + let res = Result { resultValue = runValue, resultBuilt = built, resultChanged = changed, resultVisited = curStep, resultDeps = actualDeps, resultExecution = execution, resultData = runStore } + liftIO $ atomicallyNamed "compute and run hook" $ do + dbNotLocked db + case getResultDepsDefault mempty actualDeps of + deps | not (nullKeySet deps) + && runChanged /= ChangedNothing + -> do + -- IMPORTANT: record the reverse deps **before** marking the key Clean. + -- If an async exception strikes before the deps have been recorded, + -- we won't be able to accurately propagate dirtiness for this key + -- on the next build. updateReverseDeps key db (getResultDepsDefault mempty previousDeps) deps - _ -> pure () - atomicallyNamed "compute and run hook" $ do + _ -> pure () runHook + -- it might be overridden by error if another kills this thread SMap.focus (updateStatus $ Clean res) key databaseValues pure res @@ -237,8 +296,8 @@ getDirtySet db = do calcAgeStatus _ = Nothing return $ mapMaybe (secondM calcAgeStatus) dbContents --- | Returns an approximation of the database keys, --- annotated with how long ago (in # builds) they were visited +-- | Returns an approximation of the database keys, annotated with how long ago +-- they were visited in build steps. getKeysAndVisitAge :: Database -> IO [(Key, Int)] getKeysAndVisitAge db = do values <- getDatabaseValues db @@ -247,18 +306,6 @@ getKeysAndVisitAge db = do getAge Result{resultVisited = Step s} = curr - s return keysWithVisitAge -------------------------------------------------------------------------------- --- Lazy IO trick - -data Box a = Box {fromBox :: a} - --- | Split an IO computation into an unsafe lazy value and a forcing computation -splitIO :: IO a -> (IO (), a) -splitIO act = do - let act2 = Box <$> act - let res = unsafePerformIO act2 - (void $ evaluate res, fromBox res) - --------------------------------------------------------------------------------- -- Reverse dependencies -- | Update the reverse dependencies of an Id @@ -267,7 +314,7 @@ updateReverseDeps -> Database -> KeySet -- ^ Previous direct dependencies of Id -> KeySet -- ^ Current direct dependencies of Id - -> IO () + -> STM () -- mask to ensure that all the reverse dependencies are updated updateReverseDeps myId db prev new = do forM_ (toListKeySet $ prev `differenceKeySet` new) $ \d -> @@ -280,100 +327,91 @@ updateReverseDeps myId db prev new = do -- updating all the reverse deps atomically is not needed. -- Therefore, run individual transactions for each update -- in order to avoid contention - doOne f id = atomicallyNamed "updateReverseDeps" $ - SMap.focus (alterRDeps f) id (databaseValues db) - -getReverseDependencies :: Database -> Key -> STM (Maybe KeySet) -getReverseDependencies db = (fmap.fmap) keyReverseDeps . flip SMap.lookup (databaseValues db) - -transitiveDirtySet :: Foldable t => Database -> t Key -> IO KeySet -transitiveDirtySet database = flip State.execStateT mempty . traverse_ loop - where - loop x = do - seen <- State.get - if x `memberKeySet` seen then pure () else do - State.put (insertKeySet x seen) - next <- lift $ atomically $ getReverseDependencies database x - traverse_ loop (maybe mempty toListKeySet next) - --------------------------------------------------------------------------------- --- Asynchronous computations with cancellation - --- | A simple monad to implement cancellation on top of 'Async', --- generalizing 'withAsync' to monadic scopes. -newtype AIO a = AIO { unAIO :: ReaderT (IORef [Async ()]) IO a } - deriving newtype (Applicative, Functor, Monad, MonadIO) - --- | Run the monadic computation, cancelling all the spawned asyncs if an exception arises -runAIO :: AIO a -> IO a -runAIO (AIO act) = do - asyncs <- newIORef [] - runReaderT act asyncs `onException` cleanupAsync asyncs - --- | Like 'async' but with built-in cancellation. --- Returns an IO action to wait on the result. -asyncWithCleanUp :: AIO a -> AIO (IO a) -asyncWithCleanUp act = do - st <- AIO ask - io <- unliftAIO act - -- mask to make sure we keep track of the spawned async - liftIO $ uninterruptibleMask $ \restore -> do - a <- async $ restore io - atomicModifyIORef'_ st (void a :) - return $ wait a - -unliftAIO :: AIO a -> AIO (IO a) -unliftAIO act = do - st <- AIO ask - return $ runReaderT (unAIO act) st - -newtype RunInIO = RunInIO (forall a. AIO a -> IO a) - -withRunInIO :: (RunInIO -> AIO b) -> AIO b -withRunInIO k = do - st <- AIO ask - k $ RunInIO (\aio -> runReaderT (unAIO aio) st) - -cleanupAsync :: IORef [Async a] -> IO () --- mask to make sure we interrupt all the asyncs -cleanupAsync ref = uninterruptibleMask $ \unmask -> do - asyncs <- atomicModifyIORef' ref ([],) - -- interrupt all the asyncs without waiting - mapM_ (\a -> throwTo (asyncThreadId a) AsyncCancelled) asyncs - -- Wait until all the asyncs are done - -- But if it takes more than 10 seconds, log to stderr - unless (null asyncs) $ do - let warnIfTakingTooLong = unmask $ forever $ do - sleep 10 - traceM "cleanupAsync: waiting for asyncs to finish" - withAsync warnIfTakingTooLong $ \_ -> - mapM_ waitCatch asyncs - -data Wait - = Wait {justWait :: !(IO ())} - | Spawn {justWait :: !(IO ())} - -fmapWait :: (IO () -> IO ()) -> Wait -> Wait -fmapWait f (Wait io) = Wait (f io) -fmapWait f (Spawn io) = Spawn (f io) - -waitOrSpawn :: Wait -> IO (Either (IO ()) (Async ())) -waitOrSpawn (Wait io) = pure $ Left io -waitOrSpawn (Spawn io) = Right <$> async io - -waitConcurrently_ :: [Wait] -> AIO () -waitConcurrently_ [] = pure () -waitConcurrently_ [one] = liftIO $ justWait one -waitConcurrently_ many = do - ref <- AIO ask - -- spawn the async computations. - -- mask to make sure we keep track of all the asyncs. - (asyncs, syncs) <- liftIO $ uninterruptibleMask $ \unmask -> do - waits <- liftIO $ traverse (waitOrSpawn . fmapWait unmask) many - let (syncs, asyncs) = partitionEithers waits - liftIO $ atomicModifyIORef'_ ref (asyncs ++) - return (asyncs, syncs) - -- work on the sync computations - liftIO $ sequence_ syncs - -- wait for the async computations before returning - liftIO $ traverse_ wait asyncs + doOne f id = SMap.focus (alterRDeps f) id (databaseValues db) + +-- compute the transitive reverse dependencies of a set of keys + +-- non-root +-- inline +{-# INLINE getRunTimeRDeps #-} +getRunTimeRDeps :: Database -> Key -> STM (Maybe KeySet) +getRunTimeRDeps db k = SMap.lookup k (databaseRRuntimeDep db) + +{-# INLINE getDeps #-} +getDeps :: SMap.Map Key KeySet -> Key -> STM (Maybe KeySet) +getDeps m k = SMap.lookup k m + +transitiveDirtyKeysBottomUp :: Database -> KeySet -> STM RuntimeRestartKeys +transitiveDirtyKeysBottomUp db@Database{..} seeds = do + TransitiveDirtyKeys dirtyKeys seen <- cacheTransitiveDirtyListBottomUpDFS db seeds + -- restartDirtyKeys should contain only rule keys. restartKillKeys also needs + -- the root/direct delayed-action keys, so expand through the root dependency + -- map only for the kill set. + TransitiveDirtyKeys _newKeys newSeen <- transitiveDirtyListBottomUpDFS databaseRRuntimeDepRoot seen + let rootKey = newKey "root" + pure RuntimeRestartKeys + { restartDirtyKeys = dirtyKeys + , restartKillKeys = deleteKeySet rootKey newSeen + } + + + +cacheTransitiveDirtyListBottomUpDFS :: Database -> KeySet -> STM TransitiveDirtyKeys +cacheTransitiveDirtyListBottomUpDFS Database{..} seeds = do + SMap.lookup seeds databaseTransitiveRRuntimeDepCache >>= \case + Just v -> return v + Nothing -> do + r <- transitiveDirtyListBottomUpDFS databaseRRuntimeDep seeds + SMap.insert r seeds databaseTransitiveRRuntimeDepCache + return r + +-- Edges in the reverse-dependency graph go from a child to its parents. +-- We perform a DFS and, after exploring all outgoing edges, cons the node onto +-- the accumulator. This yields children-before-parents order directly. +transitiveDirtyListBottomUpDFS :: SMap.Map Key KeySet -> KeySet -> STM TransitiveDirtyKeys +transitiveDirtyListBottomUpDFS database seeds = do + let go1 :: Key -> TransitiveDirtyKeys -> STM TransitiveDirtyKeys + go1 x acc@TransitiveDirtyKeys{transitiveDirtySet = seen} = do + if x `memberKeySet` seen + then pure acc + else do + let newAcc = acc{transitiveDirtySet = insertKeySet x seen} + mnext <- getDeps database x + childClosure <- foldrM go1 newAcc (maybe mempty toListKeySet mnext) + return childClosure{transitiveDirtyList = x : transitiveDirtyList childClosure} + -- Root keys are filtered out by 'transitiveDirtyKeysBottomUp' + -- for the dirty list, but kept in the set long enough to find + -- runtime roots that need shutdown. + -- traverse all seeds + foldrM go1 (TransitiveDirtyKeys [] mempty) (toListKeySet seeds) + +-- | Original spawnRefresh using the general pattern +-- inline +{-# INLINE spawnRefresh #-} +spawnRefresh :: + Database -> + t -> + Key -> + MVar (Either SomeException (Key, Result)) -> + Maybe Result -> + (Database -> t -> Key -> Maybe Result -> IO Result) -> + (SomeException -> IO ()) -> + (forall a. IO a -> IO a) -> + IO () +spawnRefresh db@Database {..} stack key barrier prevResult refresher rollBack restore = do + Step currentStep <- readTVarIO databaseStep + spawnAsyncWithDbRegistration + db + (DeliverStatus currentStep ("async computation; " ++ show key) key) + (refresher db stack key prevResult) + (\r -> do + case r of + Left e -> rollBack e + Right _ -> return () + handleResult key barrier r + ) restore + +-- Attempt to clear a Dirty parent that ended up with unchanged children during this event. +-- If the parent is Dirty, and every direct child is either Clean/Exception/Running for a step < eventStep, +-- and no child changed at/after eventStep, mark parent Clean (preserving its last Clean result), +-- and recursively attempt the same for its own parents. diff --git a/hls-graph/src/Development/IDE/Graph/Internal/Key.hs b/hls-graph/src/Development/IDE/Graph/Internal/Key.hs index 85cebeb110..9447c71a8a 100644 --- a/hls-graph/src/Development/IDE/Graph/Internal/Key.hs +++ b/hls-graph/src/Development/IDE/Graph/Internal/Key.hs @@ -6,6 +6,7 @@ module Development.IDE.Graph.Internal.Key ( Key -- Opaque - don't expose constructor, use newKey to create , KeyValue (..) , pattern Key + , pattern DirectKey , newKey , renderKey -- * KeyMap @@ -31,6 +32,9 @@ module Development.IDE.Graph.Internal.Key , fromListKeySet , deleteKeySet , differenceKeySet + , unionKeySet + , notMemberKeySet + , newDirectKey ) where --import Control.Monad.IO.Class () @@ -47,31 +51,50 @@ import Data.Text (Text) import qualified Data.Text as T import Data.Typeable import Development.IDE.Graph.Classes +import Prettyprinter import System.IO.Unsafe newtype Key = UnsafeMkKey Int + pattern Key :: () => (Typeable a, Hashable a, Show a) => a -> Key -pattern Key a <- (lookupKeyValue -> KeyValue a _) -{-# COMPLETE Key #-} +pattern Key a <- (lookupKeyValue -> (KeyValue a _)) +pattern DirectKey :: Int -> Key +pattern DirectKey a <- (lookupKeyValue -> (DirectKeyValue a)) +{-# COMPLETE Key, DirectKey #-} + +instance Pretty Key where + pretty = pretty . renderKey -data KeyValue = forall a . (Typeable a, Hashable a, Show a) => KeyValue a Text +data KeyValue = forall a . (Typeable a, Hashable a, Show a) => + KeyValue a Text | + DirectKeyValue Int instance Eq KeyValue where - KeyValue a _ == KeyValue b _ = Just a == cast b + KeyValue a _ == KeyValue b _ = Just a == cast b + DirectKeyValue a == DirectKeyValue b = a == b + _ == _ = False instance Hashable KeyValue where - hashWithSalt i (KeyValue x _) = hashWithSalt i (typeOf x, x) + + hashWithSalt i (KeyValue x _) = hashWithSalt i (typeOf x, x) + hashWithSalt i (DirectKeyValue x) = hashWithSalt i (typeOf x, x) instance Show KeyValue where - show (KeyValue _ t) = T.unpack t + show (KeyValue _ t) = T.unpack t + show (DirectKeyValue i) = "DirectKeyValue " ++ show i data GlobalKeyValueMap = GlobalKeyValueMap !(Map.HashMap KeyValue Key) !(IntMap KeyValue) {-# UNPACK #-} !Int keyMap :: IORef GlobalKeyValueMap keyMap = unsafePerformIO $ newIORef (GlobalKeyValueMap Map.empty IM.empty 0) - {-# NOINLINE keyMap #-} +-- | Create a new key that is guaranteed not to collide with any other key. +-- This is useful for keys that are not based on user data, e.g., for +-- tracking temporary actions. +newDirectKey :: Int -> Key +newDirectKey i = UnsafeMkKey $ negate (abs i + 1) + newKey :: (Typeable a, Hashable a, Show a) => a -> Key newKey k = unsafePerformIO $ do let !newKey = KeyValue k (T.pack (show k)) @@ -94,7 +117,9 @@ lookupKeyValue (UnsafeMkKey x) = unsafePerformIO $ do -- i.e. when it is forced for the lookup in the IntMap. k <- evaluate x GlobalKeyValueMap _ im _ <- readIORef keyMap - pure $! im IM.! k + case im IM.!? k of + Just v -> pure $! v + Nothing -> pure $! DirectKeyValue k {-# NOINLINE lookupKeyValue #-} @@ -103,13 +128,18 @@ instance Eq Key where instance Hashable Key where hashWithSalt i (UnsafeMkKey x) = hashWithSalt i x instance Show Key where - show (Key x) = show x + show (Key x) = show x + show (DirectKey x) = "DirectKey " ++ show x renderKey :: Key -> Text -renderKey (lookupKeyValue -> KeyValue _ t) = t +renderKey (lookupKeyValue -> (KeyValue _ t)) = t +renderKey (lookupKeyValue -> (DirectKeyValue i)) = T.pack ("DirectKeyValue " ++ show i) newtype KeySet = KeySet IntSet - deriving newtype (Eq, Ord, Semigroup, Monoid, NFData) + deriving newtype (Eq, Ord, Semigroup, Monoid, NFData, Hashable) + +instance Pretty KeySet where + pretty (KeySet is) = pretty (coerce (IS.toList is) :: [Key]) instance Show KeySet where showsPrec p (KeySet is)= showParen (p > 10) $ @@ -122,6 +152,9 @@ insertKeySet = coerce IS.insert memberKeySet :: Key -> KeySet -> Bool memberKeySet = coerce IS.member +notMemberKeySet :: Key -> KeySet -> Bool +notMemberKeySet = coerce IS.notMember + toListKeySet :: KeySet -> [Key] toListKeySet = coerce IS.toList @@ -131,6 +164,10 @@ nullKeySet = coerce IS.null differenceKeySet :: KeySet -> KeySet -> KeySet differenceKeySet = coerce IS.difference + +unionKeySet :: KeySet -> KeySet -> KeySet +unionKeySet = coerce IS.union + deleteKeySet :: Key -> KeySet -> KeySet deleteKeySet = coerce IS.delete diff --git a/hls-graph/src/Development/IDE/Graph/Internal/Rules.hs b/hls-graph/src/Development/IDE/Graph/Internal/Rules.hs index 9a5f36ca35..c8d951810d 100644 --- a/hls-graph/src/Development/IDE/Graph/Internal/Rules.hs +++ b/hls-graph/src/Development/IDE/Graph/Internal/Rules.hs @@ -42,12 +42,14 @@ addRule f = do v <- f (fromJust $ cast a :: key) b c v <- liftIO $ evaluate v pure $ Value . toDyn <$> v + f2 (DirectKey a) _ _ = error $ "DirectKey " ++ show a ++ " has no associated rule" runRule :: TheRules -> Key -> Maybe BS.ByteString -> RunMode -> Action (RunResult Value) runRule rules key@(Key t) bs mode = case Map.lookup (typeOf t) rules of Nothing -> liftIO $ errorIO $ "Could not find key: " ++ show key Just x -> unwrapDynamic x key bs mode +runRule _ (DirectKey a) _ _ = error $ "DirectKey " ++ show a ++ " has no associated rule" runRules :: Dynamic -> Rules () -> IO (TheRules, [Action ()]) runRules rulesExtra (Rules rules) = do diff --git a/hls-graph/src/Development/IDE/Graph/Internal/Types.hs b/hls-graph/src/Development/IDE/Graph/Internal/Types.hs index 34bed42391..43236ac1c8 100644 --- a/hls-graph/src/Development/IDE/Graph/Internal/Types.hs +++ b/hls-graph/src/Development/IDE/Graph/Internal/Types.hs @@ -1,34 +1,63 @@ {-# LANGUAGE CPP #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE RecordWildCards #-} module Development.IDE.Graph.Internal.Types where -import Control.Concurrent.STM (STM) -import Control.Monad ((>=>)) +import Control.Concurrent.STM (STM, TQueue, TVar, check, + flushTQueue, isEmptyTQueue, + modifyTVar', newTQueue, + newTVar, readTQueue, + readTVar, unGetTQueue, + writeTQueue) +import Control.Exception (throw) +import Control.Monad (forM, forM_, forever, + unless, when) import Control.Monad.Catch import Control.Monad.IO.Class -import Control.Monad.Trans.Reader +import Control.Monad.RWS (MonadReader (local), asks) +import Control.Monad.Trans.Reader (ReaderT (..)) import Data.Aeson (FromJSON, ToJSON) import Data.Bifunctor (second) import qualified Data.ByteString as BS import Data.Dynamic import Data.Foldable (fold) import qualified Data.HashMap.Strict as Map +import Data.HashSet (HashSet) +import qualified Data.HashSet as Set import Data.IORef -import Data.List (intercalate) -import Data.Maybe +import Data.List (intercalate, partition) +import Data.Maybe (fromMaybe, isJust, + isNothing) import Data.Typeable +import Debug.Trace (traceEventIO) import Development.IDE.Graph.Classes import Development.IDE.Graph.Internal.Key -import GHC.Conc (TVar, atomically) +import qualified Focus +import GHC.Conc () import GHC.Generics (Generic) import qualified ListT +import Numeric.Natural +import Prettyprinter import qualified StmContainers.Map as SMap import StmContainers.Map (Map) -import System.Time.Extra (Seconds) -import UnliftIO (MonadUnliftIO) +import System.Time.Extra (Seconds, sleep) +import UnliftIO (Async (asyncThreadId), + MonadUnliftIO, + asyncExceptionFromException, + asyncExceptionToException, + asyncWithUnmask, + atomically, cancelWith, + newEmptyTMVarIO, poll, + putTMVar, readTMVar, + readTVarIO, throwTo, + waitCatch, + withAsyncWithUnmask) +import UnliftIO.Concurrent (ThreadId, myThreadId) +import qualified UnliftIO.Exception as UE + #if !MIN_VERSION_base(4,18,0) import Control.Applicative (liftA2) @@ -68,35 +97,168 @@ data SRules = SRules { -- 'Development.IDE.Graph.Internal.Action.actionCatch'. In particular, it is -- permissible to use the 'MonadFail' instance, which will lead to an 'IOException'. newtype Action a = Action {fromAction :: ReaderT SAction IO a} - deriving newtype (Monad, Applicative, Functor, MonadIO, MonadFail, MonadThrow, MonadCatch, MonadMask, MonadUnliftIO) + deriving newtype (Monad, Applicative, Functor, MonadIO, MonadFail, MonadThrow, MonadCatch, MonadMask, MonadUnliftIO, MonadReader SAction) + +runActionMonad :: Action a -> SAction -> IO a +runActionMonad (Action r) s = runReaderT r s data SAction = SAction { + actionKey :: !Key, actionDatabase :: !Database, actionDeps :: !(IORef ResultDeps), actionStack :: !Stack } getDatabase :: Action Database -getDatabase = Action $ asks actionDatabase +getDatabase = asks actionDatabase + +getActionKey :: Action Key +getActionKey = asks actionKey + +setActionKey :: Key -> Action a -> Action a +setActionKey k act = local (\s' -> s'{actionKey = k}) act --- | waitForDatabaseRunningKeysAction waits for all keys in the database to finish running. -waitForDatabaseRunningKeysAction :: Action () -waitForDatabaseRunningKeysAction = getDatabase >>= liftIO . waitForDatabaseRunningKeys --------------------------------------------------------------------- -- DATABASE +-- | A simple priority used for annotating delayed actions. +-- Ordering is important: Debug < Info < Warning < Error +data Priority + = Debug + | Info + | Warning + | Error + deriving (Eq, Show, Read, Ord, Enum, Bounded) + +type DelayedActionInternal = DelayedAction () +-- | A delayed action that carries an Action payload. +data DelayedAction a = DelayedAction + { uniqueID :: Key + , actionName :: String -- ^ Name we use for debugging + , actionPriority :: Priority -- ^ Priority with which to log the action + , getAction :: Action a -- ^ The payload + } + deriving (Functor) + +actionNameKey :: DelayedAction a -> String +actionNameKey d = actionName d ++ " (" ++ show (uniqueID d) ++ ")" +instance Eq (DelayedAction a) where + a == b = uniqueID a == uniqueID b + +instance Hashable (DelayedAction a) where + hashWithSalt s = hashWithSalt s . uniqueID + +instance Show (DelayedAction a) where + show d = "DelayedAction: " ++ actionName d + +------------------------------------------------------------------------------- + +-- | A queue of delayed actions for the graph 'Action' monad. +data ActionQueue = ActionQueue + { newActions :: TQueue (DelayedAction ()) + , inProgress :: TVar (HashSet (DelayedAction ())) + } + +newQueue :: IO ActionQueue +newQueue = atomically $ do + newActions <- newTQueue + inProgress <- newTVar mempty + return ActionQueue {..} + +pushQueue :: DelayedAction () -> ActionQueue -> STM () +pushQueue act ActionQueue {..} = writeTQueue newActions act + +-- | Append to the front of the queue +unGetQueue :: DelayedAction () -> ActionQueue -> STM () +unGetQueue act ActionQueue {..} = unGetTQueue newActions act + +-- | You must call 'doneQueue' to signal completion +popQueue :: ActionQueue -> STM (DelayedAction ()) +popQueue ActionQueue {..} = do + x <- readTQueue newActions + modifyTVar' inProgress (Set.insert x) + return x + +popAllQueue :: ActionQueue -> STM [DelayedAction ()] +popAllQueue ActionQueue {..} = do + xs <- flushTQueue newActions + modifyTVar' inProgress (\s -> s `Set.union` Set.fromList xs) + return xs + +insertRunnning :: DelayedAction () -> ActionQueue -> STM () +insertRunnning act ActionQueue {..} = modifyTVar' inProgress (Set.insert act) + +-- | Completely remove an action from the queue +abortQueue :: DelayedAction () -> ActionQueue -> STM () +abortQueue x ActionQueue {..} = do + qq <- flushTQueue newActions + mapM_ (writeTQueue newActions) (filter (/= x) qq) + modifyTVar' inProgress (Set.delete x) + +-- | Mark an action as complete when called after 'popQueue'. +-- Has no effect otherwise +doneQueue :: DelayedAction () -> ActionQueue -> STM () +doneQueue x ActionQueue {..} = do + modifyTVar' inProgress (Set.delete x) + +countQueue :: ActionQueue -> STM Natural +countQueue ActionQueue{..} = do + backlog <- flushTQueue newActions + mapM_ (writeTQueue newActions) backlog + m <- Set.size <$> readTVar inProgress + return $ fromIntegral $ length backlog + m + +peekInProgress :: ActionQueue -> STM [DelayedAction ()] +peekInProgress ActionQueue {..} = Set.toList <$> readTVar inProgress + +isActionQueueEmpty :: ActionQueue -> STM Bool +isActionQueueEmpty ActionQueue {..} = do + emptyQueue <- isEmptyTQueue newActions + inProg <- Set.null <$> readTVar inProgress + return (emptyQueue && inProg) + data ShakeDatabase = ShakeDatabase !Int [Action ()] Database newtype Step = Step Int - deriving newtype (Eq,Ord,Hashable,Show) + deriving newtype (Eq,Ord,Hashable,Show,Num,Enum,Real,Integral) ---------------------------------------------------------------------- --- Keys +data DeliverStatus = DeliverStatus + { deliverStep :: Int + , deliverName :: String + , deliverKey :: Key + } deriving (Show) +instance Pretty DeliverStatus where + pretty (DeliverStatus step name key) = + pretty ("Step:" :: String) <+> pretty step <> comma + <+> pretty ("name:" :: String) <+> pretty name <> comma + <+> pretty ("key:" :: String) <+> pretty (show key) +getShakeStep :: MonadIO m => ShakeDatabase -> m Step +getShakeStep (ShakeDatabase _ _ db) = do + s <- readTVarIO $ databaseStep db + return s +lockShakeDatabaseValues :: MonadIO m => ShakeDatabase -> m () +lockShakeDatabaseValues (ShakeDatabase _ _ db) = do + liftIO $ atomically $ modifyTVar' (databaseValuesLock db) (const False) +unlockShakeDatabaseValues :: MonadIO m => ShakeDatabase -> m () +unlockShakeDatabaseValues (ShakeDatabase _ _ db) = do + liftIO $ atomically $ modifyTVar' (databaseValuesLock db) (const True) + +withShakeDatabaseValuesLock :: ShakeDatabase -> IO c -> IO c +withShakeDatabaseValuesLock sdb act = do + UE.bracket_ (lockShakeDatabaseValues sdb) (unlockShakeDatabaseValues sdb) act + +dbNotLocked :: Database -> STM () +dbNotLocked db = do + check =<< readTVar (databaseValuesLock db) + + +--------------------------------------------------------------------- +-- Keys newtype Value = Value Dynamic data KeyDetails = KeyDetails { @@ -109,14 +271,182 @@ onKeyReverseDeps f it@KeyDetails{..} = it{keyReverseDeps = f keyReverseDeps} data Database = Database { - databaseExtra :: Dynamic, - databaseRules :: TheRules, - databaseStep :: !(TVar Step), - databaseValues :: !(Map Key KeyDetails) + databaseExtra :: Dynamic, + + databaseThreads :: TVar [(DeliverStatus, Async ())], + + databaseRuntimeDepRoot :: SMap.Map Key KeySet, + + databaseRRuntimeDepRoot :: SMap.Map Key KeySet, + databaseRRuntimeDep :: SMap.Map Key KeySet, + -- it is used to compute the transitive reverse deps, so + -- if not in any of the transitive reverse deps of a dirty node, it is clean + -- we can skip clean the threads. + -- this is update right before we query the database for the key result. + databaseTransitiveRRuntimeDepCache :: SMap.Map KeySet TransitiveDirtyKeys, + -- ^ this is a cache for transitive reverse deps if we have computed it before + -- and the databaseRRuntimeDep did not change since last time + -- it is very useful for large projects where many files depend on a few common files + -- e.g. we do not want to recompute the transitive reverse deps every time we enter a letter + -- to a file. + + + dataBaseLogger :: String -> IO (), + + -- The action queue and + databaseActionQueue :: ActionQueue, + + + databaseRules :: TheRules, + databaseStep :: !(TVar Step), + + databaseValuesLock :: !(TVar Bool), + -- when we restart a build, we set this to False to block any other + -- threads from reading databaseValues + databaseValues :: !(Map Key KeyDetails) + } -waitForDatabaseRunningKeys :: Database -> IO () -waitForDatabaseRunningKeys = getDatabaseValues >=> mapM_ (waitRunning . snd) +data TransitiveDirtyKeys = TransitiveDirtyKeys + { transitiveDirtyList :: ![Key] + -- ^ Dirty keys in children-before-parents order. + , transitiveDirtySet :: !KeySet + -- ^ Same transitive closure as a set, used for membership/filtering. + } deriving Show + + +--------------------------------------------------------------------- +-- | Remove finished asyncs from 'databaseThreads' (non-blocking). +-- Uses 'poll' to check completion without waiting. +pruneFinished :: Database -> IO () +pruneFinished db@Database{..} = do + threads <- readTVarIO databaseThreads + statuses <- forM threads $ \(d,a) -> do + p <- poll a + return (d,a,p) + let still = [ (d,a) | (d,a,p) <- statuses, isNothing p ] + -- deleteDatabaseRuntimeDep of finished async keys + forM_ statuses $ \(d,_,p) -> when (isJust p) $ do + let k = deliverKey d + when (k /= newKey "root") $ atomically $ deleteDatabaseRuntimeDep k db + atomically $ modifyTVar' databaseThreads (const still) + +deleteDatabaseRuntimeDep :: Key -> Database -> STM () +deleteDatabaseRuntimeDep k db = do + result <- SMap.lookup k (databaseRuntimeDepRoot db) + case result of + Nothing -> return () + Just deps -> do + SMap.delete k (databaseRuntimeDepRoot db) + -- also remove k from all its reverse deps + forM_ (toListKeySet deps) $ \d -> do + SMap.focus (Focus.alter (fmap (deleteKeySet k))) d (databaseRRuntimeDepRoot db) + + +-- record runtime reverse deps for each key, +-- if it is root key, also reverse deps so when the root key is done, we can clean up the reverse deps. +insertdatabaseRuntimeDep :: Key -> Key -> Database -> STM () +insertdatabaseRuntimeDep k pk db = do + if isRootKey pk || isRootKey k + then do + SMap.focus (Focus.alter (Just . maybe (singletonKeySet k) (insertKeySet k))) pk (databaseRuntimeDepRoot db) + SMap.focus (Focus.alter (Just . maybe (singletonKeySet pk) (insertKeySet pk))) k (databaseRRuntimeDepRoot db) + else do + -- databaseRRuntimeDep only incremental, so no need to keep a reverse one + -- Also I want to know if the database changed + -- if changed we need to reset databaseTransitiveRRuntimeDepCache + SMap.lookup k (databaseRRuntimeDep db) >>= \case + Nothing -> do + SMap.insert (singletonKeySet pk) k (databaseRRuntimeDep db) + SMap.reset (databaseTransitiveRRuntimeDepCache db) + Just s -> when (pk `notMemberKeySet` s) $ do + SMap.insert (insertKeySet pk s) k (databaseRRuntimeDep db) + SMap.reset (databaseTransitiveRRuntimeDepCache db) + +-- inline +{-# INLINE isRootKey #-} +isRootKey :: Key -> Bool +isRootKey (DirectKey _a) = True +isRootKey _ = False + +--------------------------------------------------------------------- + +-- | Abstract pattern for spawning async computations with database registration. +-- This pattern is used by spawnRefresh and can be used by other functions that need: +-- 1. Protected async creation with uninterruptibleMask +-- 2. Database thread tracking and state updates +-- 3. Controlled start coordination via barriers +-- 4. Exception safety with rollback on registration failure +-- @ inline +{-# INLINE spawnAsyncWithDbRegistration #-} +spawnAsyncWithDbRegistration :: Database -> DeliverStatus -> IO a1 -> (Either SomeException a1 -> IO ()) -> (forall a. IO a -> IO a) -> IO () +spawnAsyncWithDbRegistration db@Database{..} deliver asyncBody handler restore = do + startBarrier <- newEmptyTMVarIO + -- 1. we need to make sure the thread is registered before we actually start + -- 2. we should not start in between the restart + -- 3. if it is killed before we start, we need to cancel the async + let register a = do + dbNotLocked db + modifyTVar' databaseThreads ((deliver, a):) + -- make sure we only start after the restart + putTMVar startBarrier () + a <- asyncWithUnmask $ \restore -> (handler =<< ((restore $ atomically (readTMVar startBarrier) >> (Right <$> asyncBody)) `catch` \e@(SomeException _) -> return (Left e))) + (restore $ atomically $ register a) + `catch` \e@(SomeException _) -> do + cancelWith a e + throw e + +-- inline +{-# INLINE runInThreadStmInNewThreads #-} +runInThreadStmInNewThreads :: Database -> DeliverStatus -> IO a -> (Either SomeException a -> IO ()) -> IO () +runInThreadStmInNewThreads db deliver act handler = uninterruptibleMask $ \restore -> + spawnAsyncWithDbRegistration db deliver act handler restore + +getDataBaseStepInt :: Database -> STM Int +getDataBaseStepInt db = do + Step s <- readTVar $ databaseStep db + return s + +data AsyncParentKill = AsyncParentKill ThreadId Step [Key] + deriving (Show, Eq) + +instance Exception AsyncParentKill where + toException = asyncExceptionToException + fromException = asyncExceptionFromException + +shutDatabase ::KeySet -> Database -> IO () +shutDatabase dirties db@Database{..} = uninterruptibleMask $ \_unmask -> do + -- wait for all threads to finish + asyncs <- readTVarIO databaseThreads + step <- readTVarIO databaseStep + tid <- myThreadId + let rootKey = newKey "root" + let (toCancel, remains) = partition (\(k, _) -> deliverKey k `memberKeySet` dirties || deliverKey k == rootKey) asyncs + atomically $ modifyTVar' databaseThreads (const remains) + mapM_ (\(k, a) -> throwTo (asyncThreadId a) $ AsyncParentKill tid step [deliverKey k, newKey "shutDatabase"]) toCancel + -- Wait until all the asyncs are done + -- But if it takes more than 10 seconds, log to stderr + unless (null asyncs) $ do + let warnIfTakingTooLong = forever $ do + sleep 5 + as <- readTVarIO databaseThreads + -- poll each async: Nothing => still running + statuses <- forM as $ \(d,a) -> do + p <- poll a + return (d, a, p) + let still = [ (deliverName d, show (asyncThreadId a)) | (d,a,p) <- statuses, isNothing p ] + traceEventIO $ "cleanupAsync: waiting for asyncs to finish; total=" ++ show (length as) ++ ", stillRunning=" ++ show (length still) + traceEventIO $ "cleanupAsync: still running (deliverName, threadId) = " ++ show still + withAsyncWithUnmask (\restore -> restore warnIfTakingTooLong) $ \_ -> mapM_ (waitCatch . snd) toCancel + forM_ toCancel $ \(d,_p) -> do + let k = deliverKey d + when (k /= newKey "root") $ atomically $ deleteDatabaseRuntimeDep k db + pruneFinished db + +peekAsyncsDelivers :: MonadIO m => Database -> m [DeliverStatus] +peekAsyncsDelivers db = do + asyncs <- readTVarIO (databaseThreads db) + return $ fst <$> asyncs getDatabaseValues :: Database -> IO [(Key, Status)] getDatabaseValues = atomically @@ -127,26 +457,38 @@ getDatabaseValues = atomically data Status = Clean !Result + -- dirty should say why it is dirty, + -- it should and only should be clean, + -- once all the event has been processed, + -- once event is represeted by a step | Dirty (Maybe Result) | Running { - runningStep :: !Step, - runningWait :: !(IO ()), - runningResult :: Result, -- LAZY - runningPrev :: !(Maybe Result) + runningStep :: !Step, + -- runningResult :: Result, -- LAZY + runningPrev :: !(Maybe Result) + -- runningWait :: !(MVar (Either SomeException (Key, Result))) } +instance Show Status where + show (Clean _) = "Clean" + show (Dirty _) = "Dirty" + show (Running s _ ) = "Running step " ++ show s viewDirty :: Step -> Status -> Status -viewDirty currentStep (Running s _ _ re) | currentStep /= s = Dirty re +-- viewDirty currentStep (Running s re _ _) | currentStep /= s = Dirty re viewDirty _ other = other + +viewToRun :: Maybe Status -> Status +-- viewToRun _currentStep (Dirty _) = Nothing +-- viewToRun currentStep (Running s _re _ _) | currentStep /= s = Nothing +viewToRun Nothing = (Dirty Nothing) +viewToRun (Just other) = other + getResult :: Status -> Maybe Result -getResult (Clean re) = Just re -getResult (Dirty m_re) = m_re -getResult (Running _ _ _ m_re) = m_re -- watch out: this returns the previous result +getResult (Clean re) = Just re +getResult (Dirty m_re) = m_re +getResult (Running _ m_re ) = m_re -- watch out: this returns the previous result -waitRunning :: Status -> IO () -waitRunning Running{..} = runningWait -waitRunning _ = return () data Result = Result { resultValue :: !Value, @@ -194,6 +536,12 @@ data RunMode | RunDependenciesChanged -- ^ At least one of my dependencies from last time have changed, or I have no recorded dependencies. deriving (Eq,Show) +instance Monoid RunMode where + mempty = RunDependenciesSame +instance Semigroup RunMode where + RunDependenciesSame <> b = b + RunDependenciesChanged <> _ = RunDependenciesChanged + instance NFData RunMode where rnf x = x `seq` () -- | How the output of a rule has changed. diff --git a/hls-graph/test/ActionSpec.hs b/hls-graph/test/ActionSpec.hs index 97ab5555ac..31f1f873a8 100644 --- a/hls-graph/test/ActionSpec.hs +++ b/hls-graph/test/ActionSpec.hs @@ -6,24 +6,52 @@ module ActionSpec where import Control.Concurrent (MVar, readMVar) import qualified Control.Concurrent as C import Control.Concurrent.STM +import Control.Exception (SomeException) +import Control.Monad (void) import Control.Monad.IO.Class (MonadIO (..)) -import Development.IDE.Graph (shakeOptions) -import Development.IDE.Graph.Database (shakeNewDatabase, +import Data.Typeable (Typeable) +import Development.IDE.Graph (RuleResult, + shakeOptions) +import Development.IDE.Graph.Classes (Hashable) +import Development.IDE.Graph.Database (RuntimeRestartKeys (..), + mkDelayedAction, + shakeComputeToPreserve, + shakeNewDatabase, shakeRunDatabase, - shakeRunDatabaseForKeys) + shakeRunDatabaseForKeys, + shakeShutDatabase) +import Development.IDE.Graph.Internal.Action (actionCatch, + actionFinally, + pumpActionThreadReRun) import Development.IDE.Graph.Internal.Database (build, incDatabase) import Development.IDE.Graph.Internal.Key import Development.IDE.Graph.Internal.Types import Development.IDE.Graph.Rule import Example import qualified StmContainers.Map as STM +import System.Timeout (timeout) import Test.Hspec +buildWithRoot :: forall f key value . (Traversable f, RuleResult key ~ value, Typeable key, Show key, Hashable key, Typeable value) => Database -> Stack -> f key -> IO (f Key, f value) +buildWithRoot = build (newKey ("root" :: [Char])) + +itInThread :: String -> IO () -> SpecWith () +itInThread = it + +shakeRunDatabaseFromRight :: ShakeDatabase -> [Action a] -> IO [a] +shakeRunDatabaseFromRight = shakeRunDatabase + +waitForRuntimeRootDep :: Database -> Key -> Key -> IO () +waitForRuntimeRootDep Database{..} child parent = + atomically $ do + deps <- STM.lookup child databaseRRuntimeDepRoot + check $ maybe False (memberKeySet parent) deps + spec :: Spec spec = do - describe "apply1" $ it "Test build update, Buggy dirty mechanism in hls-graph #4237" $ do + describe "apply1" $ itInThread "Test build update, Buggy dirty mechanism in hls-graph #4237" $ do let ruleStep1 :: MVar Int -> Rules () ruleStep1 m = addRule $ \CountRule _old mode -> do -- depends on ruleSubBranch, it always changed if dirty @@ -43,7 +71,7 @@ spec = do ruleSubBranch count ruleStep1 count1 -- bootstrapping the database - _ <- shakeRunDatabase db $ pure $ apply1 CountRule -- count = 1 + _ <- shakeRunDatabaseFromRight db $ pure $ apply1 CountRule -- count = 1 let child = newKey SubBranchRule let parent = newKey CountRule -- instruct to RunDependenciesChanged then CountRule should be recomputed @@ -58,43 +86,78 @@ spec = do _res3 <- shakeRunDatabaseForKeys (Just [parent]) db [apply1 CountRule] -- count = 2 c1 <- readMVar count1 c1 `shouldBe` 2 - describe "apply1" $ do - it "computes a rule with no dependencies" $ do + describe "apply1" $ do + itInThread "computes a rule with no dependencies" $ do db <- shakeNewDatabase shakeOptions ruleUnit - res <- shakeRunDatabase db $ + res <- shakeRunDatabaseFromRight db $ pure $ apply1 (Rule @()) res `shouldBe` [()] - it "computes a rule with one dependency" $ do + itInThread "computes a rule with one dependency" $ do db <- shakeNewDatabase shakeOptions $ do ruleUnit ruleBool - res <- shakeRunDatabase db $ pure $ apply1 Rule + res <- shakeRunDatabaseFromRight db $ pure $ apply1 Rule res `shouldBe` [True] - it "tracks direct dependencies" $ do + itInThread "tracks direct dependencies" $ do db@(ShakeDatabase _ _ theDb) <- shakeNewDatabase shakeOptions $ do ruleUnit ruleBool let theKey = Rule @Bool - res <- shakeRunDatabase db $ + res <- shakeRunDatabaseFromRight db $ pure $ apply1 theKey res `shouldBe` [True] Just (Clean res) <- lookup (newKey theKey) <$> getDatabaseValues theDb resultDeps res `shouldBe` ResultDeps [singletonKeySet $ newKey (Rule @())] - it "tracks reverse dependencies" $ do + itInThread "tracks reverse dependencies" $ do db@(ShakeDatabase _ _ Database {..}) <- shakeNewDatabase shakeOptions $ do ruleUnit ruleBool let theKey = Rule @Bool - res <- shakeRunDatabase db $ + res <- shakeRunDatabaseFromRight db $ pure $ apply1 theKey res `shouldBe` [True] Just KeyDetails {..} <- atomically $ STM.lookup (newKey (Rule @())) databaseValues keyReverseDeps `shouldBe` singletonKeySet (newKey theKey) - it "rethrows exceptions" $ do + itInThread "rethrows exceptions" $ do db <- shakeNewDatabase shakeOptions $ addRule $ \(Rule :: Rule ()) _old _mode -> error "boom" - let res = shakeRunDatabase db $ pure $ apply1 (Rule @()) + let res = shakeRunDatabaseFromRight db $ pure $ apply1 (Rule @()) res `shouldThrow` anyErrorCall - it "computes a rule with branching dependencies does not invoke phantom dependencies #3423" $ do + itInThread "restart kills a delayed action parked behind a caught producer failure" $ do + producerStarted <- C.newEmptyMVar + releaseProducer <- C.newEmptyMVar + producerCaught <- C.newEmptyMVar + waiterFinalized <- C.newEmptyMVar + sdb@(ShakeDatabase _ _ theDb) <- shakeNewDatabase shakeOptions $ + addRule $ \(Rule :: Rule Int) _old _mode -> do + liftIO $ void $ C.tryPutMVar producerStarted () + liftIO $ readMVar releaseProducer + error "boom" + producer <- mkDelayedAction "producer" Debug $ + actionCatch @SomeException + (void $ apply1 (Rule @Int)) + (\_ -> liftIO $ void $ C.tryPutMVar producerCaught ()) + waiter <- mkDelayedAction "waiter" Debug $ + actionFinally + (do + liftIO $ readMVar producerStarted + void $ apply1 (Rule @Int)) + (void $ C.tryPutMVar waiterFinalized ()) + + _ <- shakeRunDatabaseForKeys Nothing sdb + [ pumpActionThreadReRun sdb producer + , pumpActionThreadReRun sdb waiter + ] + let dirtyKey = newKey (Rule @Int) + waitForRuntimeRootDep theDb dirtyKey (uniqueID waiter) + C.putMVar releaseProducer () + readMVar producerCaught + C.tryReadMVar waiterFinalized >>= (`shouldBe` Nothing) + + runtimeRestartKeys <- shakeComputeToPreserve sdb (singletonKeySet dirtyKey) + uniqueID waiter `memberKeySet` restartKillKeys runtimeRestartKeys `shouldBe` True + shakeShutDatabase (restartKillKeys runtimeRestartKeys) sdb + timeout 1000000 (readMVar waiterFinalized) >>= (`shouldBe` Just ()) + itInThread "computes a rule with branching dependencies does not invoke phantom dependencies #3423" $ do cond <- C.newMVar True count <- C.newMVar 0 (ShakeDatabase _ _ theDb) <- shakeNewDatabase shakeOptions $ do @@ -105,18 +168,18 @@ spec = do -- build the one with the condition True -- This should call the SubBranchRule once -- cond rule would return different results each time - res0 <- build theDb emptyStack [BranchedRule] + res0 <- buildWithRoot theDb emptyStack [BranchedRule] snd res0 `shouldBe` [1 :: Int] - incDatabase theDb Nothing + _ <- incDatabase theDb Nothing -- build the one with the condition False -- This should not call the SubBranchRule - res1 <- build theDb emptyStack [BranchedRule] + res1 <- buildWithRoot theDb emptyStack [BranchedRule] snd res1 `shouldBe` [2 :: Int] - -- SubBranchRule should be recomputed once before this (when the condition was True) - countRes <- build theDb emptyStack [SubBranchRule] + -- SubBranchRule should be recomputed once before this (when the condition was True) + countRes <- buildWithRoot theDb emptyStack [SubBranchRule] snd countRes `shouldBe` [1 :: Int] - describe "applyWithoutDependency" $ it "does not track dependencies" $ do + describe "applyWithoutDependency" $ itInThread "does not track dependencies" $ do db@(ShakeDatabase _ _ theDb) <- shakeNewDatabase shakeOptions $ do ruleUnit addRule $ \Rule _old _mode -> do @@ -124,7 +187,7 @@ spec = do return $ RunResult ChangedRecomputeDiff "" True $ return () let theKey = Rule @Bool - res <- shakeRunDatabase db $ + res <- shakeRunDatabaseFromRight db $ pure $ applyWithoutDependency [theKey] res `shouldBe` [[True]] Just (Clean res) <- lookup (newKey theKey) <$> getDatabaseValues theDb diff --git a/hls-graph/test/DatabaseSpec.hs b/hls-graph/test/DatabaseSpec.hs index 9061bfa89d..643356c429 100644 --- a/hls-graph/test/DatabaseSpec.hs +++ b/hls-graph/test/DatabaseSpec.hs @@ -2,6 +2,7 @@ module DatabaseSpec where +import ActionSpec (itInThread) import Development.IDE.Graph (newKey, shakeOptions) import Development.IDE.Graph.Database (shakeNewDatabase, shakeRunDatabase) @@ -13,11 +14,10 @@ import Example import System.Time.Extra (timeout) import Test.Hspec - spec :: Spec spec = do describe "Evaluation" $ do - it "detects cycles" $ do + itInThread "detects cycles" $ do db <- shakeNewDatabase shakeOptions $ do ruleBool addRule $ \Rule _old _mode -> do @@ -27,17 +27,16 @@ spec = do timeout 1 res `shouldThrow` \StackException{} -> True describe "compute" $ do - it "build step and changed step updated correctly" $ do + itInThread "build step and changed step updated correctly" $ do (ShakeDatabase _ _ theDb) <- shakeNewDatabase shakeOptions $ do ruleStep - let k = newKey $ Rule @() -- ChangedRecomputeSame r1@Result{resultChanged=rc1, resultBuilt=rb1} <- compute theDb emptyStack k RunDependenciesChanged Nothing - incDatabase theDb Nothing + _ <- incDatabase theDb Nothing -- ChangedRecomputeSame r2@Result{resultChanged=rc2, resultBuilt=rb2} <- compute theDb emptyStack k RunDependenciesChanged (Just r1) - incDatabase theDb Nothing + _ <- incDatabase theDb Nothing -- changed Nothing Result{resultChanged=rc3, resultBuilt=rb3} <- compute theDb emptyStack k RunDependenciesSame (Just r2) rc1 `shouldBe` Step 0