diff --git a/doc/ref/grains/all/index.rst b/doc/ref/grains/all/index.rst index b68833476d2a..1a78fc952964 100644 --- a/doc/ref/grains/all/index.rst +++ b/doc/ref/grains/all/index.rst @@ -19,4 +19,5 @@ grains modules opts package pending_reboot + resources rest_sample diff --git a/doc/ref/grains/all/salt.grains.resources.rst b/doc/ref/grains/all/salt.grains.resources.rst new file mode 100644 index 000000000000..5bc9bfb76fbf --- /dev/null +++ b/doc/ref/grains/all/salt.grains.resources.rst @@ -0,0 +1,5 @@ +salt.grains.resources +===================== + +.. automodule:: salt.grains.resources + :members: diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst index de751f76a1f3..1464ea551440 100644 --- a/doc/ref/modules/all/index.rst +++ b/doc/ref/modules/all/index.rst @@ -67,6 +67,7 @@ execution modules dpkg_lowpkg dummyproxy_pkg dummyproxy_service + dummyresource_test environ etcd_mod ethtool @@ -215,6 +216,10 @@ execution modules ssh_pkg ssh_pki ssh_service + sshresource_cmd + sshresource_pkg + sshresource_state + sshresource_test state status supervisord diff --git a/doc/ref/modules/all/salt.modules.dummyresource_test.rst b/doc/ref/modules/all/salt.modules.dummyresource_test.rst new file mode 100644 index 000000000000..a78e2d44a089 --- /dev/null +++ b/doc/ref/modules/all/salt.modules.dummyresource_test.rst @@ -0,0 +1,6 @@ +salt.modules.dummyresource_test +=============================== + +.. automodule:: salt.modules.dummyresource_test + :members: + :undoc-members: diff --git a/doc/ref/modules/all/salt.modules.sshresource_cmd.rst b/doc/ref/modules/all/salt.modules.sshresource_cmd.rst new file mode 100644 index 000000000000..c8f4af6bb243 --- /dev/null +++ b/doc/ref/modules/all/salt.modules.sshresource_cmd.rst @@ -0,0 +1,6 @@ +salt.modules.sshresource_cmd +============================ + +.. automodule:: salt.modules.sshresource_cmd + :members: + :undoc-members: diff --git a/doc/ref/modules/all/salt.modules.sshresource_pkg.rst b/doc/ref/modules/all/salt.modules.sshresource_pkg.rst new file mode 100644 index 000000000000..6d9ce028a6be --- /dev/null +++ b/doc/ref/modules/all/salt.modules.sshresource_pkg.rst @@ -0,0 +1,6 @@ +salt.modules.sshresource_pkg +============================ + +.. automodule:: salt.modules.sshresource_pkg + :members: + :undoc-members: diff --git a/doc/ref/modules/all/salt.modules.sshresource_state.rst b/doc/ref/modules/all/salt.modules.sshresource_state.rst new file mode 100644 index 000000000000..e9faad2df2fd --- /dev/null +++ b/doc/ref/modules/all/salt.modules.sshresource_state.rst @@ -0,0 +1,6 @@ +salt.modules.sshresource_state +============================== + +.. automodule:: salt.modules.sshresource_state + :members: + :undoc-members: diff --git a/doc/ref/modules/all/salt.modules.sshresource_test.rst b/doc/ref/modules/all/salt.modules.sshresource_test.rst new file mode 100644 index 000000000000..af51226c5a0f --- /dev/null +++ b/doc/ref/modules/all/salt.modules.sshresource_test.rst @@ -0,0 +1,6 @@ +salt.modules.sshresource_test +============================= + +.. automodule:: salt.modules.sshresource_test + :members: + :undoc-members: diff --git a/salt/client/__init__.py b/salt/client/__init__.py index 2732dc20a54a..63435755a0da 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -1259,8 +1259,19 @@ def get_iter_returns( # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet - jinfo = self.gather_job_info( - jid, list(minions - found), "list", **kwargs + # Only send gather_job_info to IDs that are accepted minions. + # Resource IDs (e.g. "dummy-01") are not PKI keys; sending + # saltutil.find_job to them as a list target would fail and + # print a misleading "No minions matched" message. + pending = minions - found + accepted_minions = set( + salt.utils.minions.CkMinions(self.opts)._pki_minions() + ) + minion_pending = list(pending & accepted_minions) + jinfo = ( + self.gather_job_info(jid, minion_pending, "list", **kwargs) + if minion_pending + else {} ) minions_running = False # if we weren't assigned any jid that means the master thinks diff --git a/salt/client/netapi.py b/salt/client/netapi.py index 27029af85a3e..2aefeb126f71 100644 --- a/salt/client/netapi.py +++ b/salt/client/netapi.py @@ -2,6 +2,7 @@ The main entry point for salt-api """ +import asyncio import logging import signal @@ -63,7 +64,7 @@ def run(self): # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) - self.process_manager.run() + asyncio.run(self.process_manager.run()) def _handle_signals(self, signum, sigframe): # escalate the signals to the process manager diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py index 634e924f805e..c99bde9aaa1f 100644 --- a/salt/client/ssh/__init__.py +++ b/salt/client/ssh/__init__.py @@ -229,6 +229,18 @@ tar --strip-components=1 -xf "$RELENV_TAR" -C "{THIN_DIR}" fi +# BUG-WORKAROUND: salt-ssh relenv path never writes the minion config that +# Single.__init__ builds in self.minion_config. The non-relenv (salt-thin) +# path embeds it in SSH_PY_SHIM via OPTIONS.config, which the Python shim +# writes to thin_dir/minion. The relenv shim has no equivalent, so salt-call +# falls back to system defaults (/var/cache/salt, /var/log/salt) and fails for +# any unprivileged user. Writing it here replicates the salt-thin behaviour. +# See: https://github.com/saltstack/salt (file as issue against salt-ssh relenv) +mkdir -p "{THIN_DIR}/running_data/pki" +cat > "{THIN_DIR}/minion" << 'SALT_MINION_CONF_EOF' +__SALT_MINION_CONFIG__ +SALT_MINION_CONF_EOF + # Check if Python binary is executable if [ ! -x "$SALT_CALL_BIN" ]; then echo "ERROR: salt-call binary not found or not executable at $SALT_CALL_BIN" >&2 @@ -265,9 +277,6 @@ echo "{RSTR}" echo "{RSTR}" >&2 -# Debug: Show the actual command being executed -echo "SALT_CALL_CMD: $SALT_CALL_BIN --retcode-passthrough --local --metadata --out=json -lquiet -c {THIN_DIR} -- {ARGS}" >&2 - exec $SUDO "$SALT_CALL_BIN" --retcode-passthrough --local --metadata --out=json -lquiet -c "{THIN_DIR}" -- {ARGS} EOF """.split( @@ -1191,40 +1200,26 @@ def __init__( self.arch = arch.strip() if self.opts.get("relenv"): - # Check if OS/arch already detected and cached in opts - if "relenv_kernel" in opts and "relenv_os_arch" in opts: - kernel = opts["relenv_kernel"] - os_arch = opts["relenv_os_arch"] - log.warning(f"RELENV: Reusing cached OS/arch: {kernel}/{os_arch}") + if thin: + # Caller pre-resolved the relenv tarball path — skip the SSH + # round-trip that detect_os_arch() would otherwise make during + # __init__. This is important when Single is created inside a + # minion job worker where every extra SSH connection adds latency + # and can cause hangs. + self.thin = thin else: - # First Single instance - detect and cache OS/arch in opts before assigning to self.opts kernel, os_arch = self.detect_os_arch() - opts["relenv_kernel"] = kernel - opts["relenv_os_arch"] = os_arch - log.warning(f"RELENV: Detected and cached OS/arch: {kernel}/{os_arch}") - - log.info( - "RELENV: About to call gen_relenv() to download/generate tarball..." - ) - self.thin = salt.utils.relenv.gen_relenv( - self.opts["cachedir"], kernel=kernel, os_arch=os_arch - ) - log.info( - "RELENV: gen_relenv() completed successfully, tarball path: %s", - self.thin, - ) + self.thin = salt.utils.relenv.gen_relenv( + opts["cachedir"], kernel=kernel, os_arch=os_arch + ) # Add file_roots and related config to minion config # (required for slsutil functions and other fileserver operations) - # Thin does this in _run_wfunc_thin() at lines 1498-1507 - # NOTE: Now that we transfer config via SCP instead of embedding in command line, - # we CAN add __master_opts__ without hitting ARG_MAX limits self.minion_opts["file_roots"] = self.opts["file_roots"] self.minion_opts["pillar_roots"] = self.opts["pillar_roots"] - self.minion_opts["ext_pillar"] = self.opts["ext_pillar"] - # For relenv, we need to override extension_modules to point to where the shim - # extracts the tarball on the remote system. The wrapper system will copy this - # to opts_pkg["extension_modules"] which is used by salt-call. + self.minion_opts["ext_pillar"] = self.opts.get("ext_pillar", []) + # For relenv, override extension_modules to point to where the shim + # extracts the tarball on the remote system. self.minion_opts["extension_modules"] = ( f"{self.thin_dir}/running_data/var/cache/salt/minion/extmods" ) @@ -1232,18 +1227,7 @@ def __init__( self.minion_opts["__master_opts__"] = self.context["master_opts"] # Re-serialize the minion config after updating relenv-specific paths - # This ensures the config file sent to the remote system has the correct extension_modules path self.minion_config = salt.serializers.yaml.serialize(self.minion_opts) - log.debug( - "RELENV: Re-serialized minion config with extension_modules=%s", - self.minion_opts["extension_modules"], - ) - - # NOTE: We no longer pre-compile pillar for relenv here. - # Both thin and relenv now use the wrapper system (_run_wfunc_thin()) - # which compiles pillar dynamically, ensuring correct behavior with pillar overrides: - # - 1x compilation without pillar overrides - # - 2x compilation with pillar overrides (re-compiled in wrapper modules) else: self.thin = thin if thin else salt.utils.thin.thin_path(opts["cachedir"]) @@ -1867,22 +1851,16 @@ def _cmd_str(self): and isinstance(self.argv[0], str) and " " in self.argv[0] ): - # Split the string into shell words argv_to_use = shlex.split(self.argv[0]) else: argv_to_use = self.argv quoted_args = " ".join(shlex.quote(str(arg)) for arg in argv_to_use) - log.debug( - "RELENV: Building shim with argv=%s, argv_to_use=%s, quoted_args=%s", - self.argv, - argv_to_use, - quoted_args, - ) # Note: Config is sent separately via SCP in cmd_block() to avoid ARG_MAX issues - # The shim expects the config file to already exist at {THIN_DIR}/minion - return SSH_SH_SHIM_RELENV.format( + # Use .replace() for minion_config — it is YAML flow-style and + # may contain literal { } which would break .format(). + shim = SSH_SH_SHIM_RELENV.format( DEBUG=debug, SUDO=sudo, SUDO_USER=sudo_user or "", @@ -1892,6 +1870,7 @@ def _cmd_str(self): ARGS=quoted_args, EXT_MODS_VERSION=self.mods.get("version", ""), ) + return shim.replace("__SALT_MINION_CONFIG__", self.minion_config) thin_code_digest, thin_sum = salt.utils.thin.thin_sum(cachedir, "sha1") arg_str = ''' diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 9a15be4fa0eb..f26e8de23d66 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -471,6 +471,8 @@ def _gather_buffer_space(): "return_retry_tries": int, # Configures amount of retries for Syndic to Master of Masters "syndic_retries": int, + # Top-level pillar key for per-type resource configuration (default: resources) + "resource_pillar_key": str, # Specify one or more returners in which all events will be sent to. Requires that the returners # in question have an event_return(event) function! "event_return": (list, str), @@ -1285,6 +1287,7 @@ def _gather_buffer_space(): "return_retry_timer": 5, "return_retry_timer_max": 10, "return_retry_tries": 3, + "resource_pillar_key": "resources", "syndic_retries": 3, "random_reauth_delay": 10, "winrepo_source_dir": "salt://win/repo-ng/", diff --git a/salt/grains/resources.py b/salt/grains/resources.py new file mode 100644 index 000000000000..468498bd11a0 --- /dev/null +++ b/salt/grains/resources.py @@ -0,0 +1,28 @@ +""" +Expose the resource IDs managed by this minion as a grain. + +The grain ``salt_resources`` mirrors the ``resources:`` section of the minion +configuration so that the master's grains cache records which resources each +minion manages. This enables grain-based targeting (``G@salt_resources``) and +gives operators a human-readable view of resource topology via ``grains.items``. + +Example output:: + + salt_resources: + dummy: + - dummy-01 + - dummy-02 + - dummy-03 +""" + +import logging + +log = logging.getLogger(__name__) + + +def resources(): + """Return the resource IDs managed by this minion, keyed by resource type.""" + managed = __opts__.get("resources", {}) + if not managed: + return {} + return {"salt_resources": managed} diff --git a/salt/loader/__init__.py b/salt/loader/__init__.py index 8faa48e59d16..f4da0d365112 100644 --- a/salt/loader/__init__.py +++ b/salt/loader/__init__.py @@ -63,6 +63,7 @@ str(SALT_BASE_PATH / "output"), str(SALT_BASE_PATH / "pillar"), str(SALT_BASE_PATH / "proxy"), + str(SALT_BASE_PATH / "resource"), str(SALT_BASE_PATH / "queues"), str(SALT_BASE_PATH / "renderers"), str(SALT_BASE_PATH / "returners"), @@ -495,6 +496,92 @@ def proxy( ) +def resource( + opts, + functions=None, + utils=None, + context=None, + loaded_base_name=None, +): + """ + Load the resource connection modules (``salt/resource/*.py``). + + Returns a LazyLoader whose functions are accessible via the + ``__resource_funcs__`` dunder injected into resource execution modules. + Analogous to :func:`proxy` for proxy minions. + + :param dict opts: The Salt options dictionary. + :param LazyLoader functions: A LazyLoader returned from :func:`minion_mods`. + :param LazyLoader utils: A LazyLoader returned from :func:`utils`. + :param dict context: Shared loader context dictionary. + :param str loaded_base_name: Module namespace prefix for this loader. + """ + return LazyLoader( + _module_dirs(opts, "resource"), + opts, + tag="resource", + pack={ + "__salt__": functions, + "__utils__": utils, + "__context__": context, + "__resource__": {}, + }, + extra_module_dirs=utils.module_dirs if utils else None, + pack_self="__resource_funcs__", + loaded_base_name=loaded_base_name, + ) + + +def resource_modules( + opts, + resource_type, + resource_funcs=None, + utils=None, + context=None, + loaded_base_name=None, +): + """ + Load execution modules for a specific resource type. + + Creates an isolated :class:`LazyLoader` whose opts contain + ``resource_type``, allowing execution modules to gate their + ``__virtual__`` on that value — the same mechanism proxy modules use + with ``proxytype``. A minion managing N resource types holds N of + these loaders simultaneously (one per type, not one per device). + + :param dict opts: The Salt options dictionary. A copy is made and + ``resource_type`` is injected before passing to the loader. + :param str resource_type: The resource type string (e.g. ``"dummy"``). + :param LazyLoader resource_funcs: The resource connection loader returned + by :func:`resource`, injected as ``__resource_funcs__``. + :param LazyLoader utils: A LazyLoader returned from :func:`utils`. + :param dict context: Shared loader context dictionary. + :param str loaded_base_name: Module namespace prefix for this loader. + """ + resource_opts = dict(opts) + resource_opts["resource_type"] = resource_type + + return LazyLoader( + _module_dirs(resource_opts, "modules", "module"), + resource_opts, + tag="module", + pack={ + "__context__": context, + "__utils__": utils, + "__resource_funcs__": resource_funcs, + "__opts__": resource_opts, + # Empty sentinel so LazyLoader creates a NamedLoaderContext for + # __resource__ on every loaded module. The NamedLoaderContext + # reads from resource_ctxvar, which _thread_return sets per-call + # before dispatching — giving each resource job its own identity. + "__resource__": {}, + }, + extra_module_dirs=utils.module_dirs if utils else None, + loaded_base_name=loaded_base_name, + pack_self="__salt__", + ) + + def returners( opts, functions, whitelist=None, context=None, proxy=None, loaded_base_name=None ): diff --git a/salt/loader/context.py b/salt/loader/context.py index 40b608de6d4c..0859df9132be 100644 --- a/salt/loader/context.py +++ b/salt/loader/context.py @@ -19,6 +19,14 @@ loader_ctxvar = contextvars.ContextVar(DEFAULT_CTX_VAR) +# Per-call resource context. Set via resource_ctxvar.set() in +# _thread_return before executing the job. contextvars are per-thread: each +# new thread inherits a copy of the parent's context, and set() only mutates +# the current thread's copy. LazyLoader.run() calls copy_context() fresh on +# every invocation, so the snapshot it passes to _last_context.run() already +# contains the value we set here — completely isolated from other threads. +resource_ctxvar = contextvars.ContextVar("__resource__", default={}) + @contextlib.contextmanager def loader_context(loader): @@ -68,6 +76,13 @@ def value(self): """ The value of the current for this context """ + # __resource__ is served from resource_ctxvar, which is set + # per-thread in _thread_return before the job function executes. + # LazyLoader.run() snapshots the thread context via copy_context() + # on every call, so each _run_as invocation sees the value that was + # current when the function was invoked — no pack mutation needed. + if self.name == "__resource__": + return resource_ctxvar.get() loader = self.loader() if loader is None: return self.default diff --git a/salt/master.py b/salt/master.py index 73e596050004..b4272c6ddf63 100644 --- a/salt/master.py +++ b/salt/master.py @@ -1085,7 +1085,7 @@ def __bind(self): args=(self.opts, self.master_key, self.key, req_channels), name=name, ) - self.process_manager.run() + asyncio.run(self.process_manager.run()) def run(self): """ @@ -1364,6 +1364,7 @@ class AESFuncs(TransportMethods): "_mine", "_mine_delete", "_mine_flush", + "_register_resources", "_file_recv", "_pillar", "_minion_event", @@ -1656,6 +1657,31 @@ def _mine_flush(self, load): else: return self.masterapi._mine_flush(load, skip_verify=True) + def _register_resources(self, load): + """ + Update the flat resource index for a minion and persist it to the + cache. Called by the minion on startup via ``cmd: "_register_resources"`` + so that the master knows which resource IDs each minion manages. + + Uses :func:`salt.utils.minions._update_resource_index` which atomically + updates both the in-process index (so this worker sees the change + immediately) and the single flat ``resource_index`` cache file (so + other workers pick it up within their TTL window). + """ + load = self.__verify_load(load, ("id", "resources")) + if load is False: + return {} + if self.opts.get("minion_data_cache", True): + salt.utils.minions._update_resource_index( + self.masterapi.cache, load["id"], load["resources"] + ) + log.debug( + "Registered resources for minion '%s': %s", + load["id"], + list(load["resources"].keys()), + ) + return True + def _file_recv(self, load): """ Allows minions to send files to the master, files are sent to the @@ -1857,6 +1883,13 @@ def _return(self, load): ) load["sig"] = sig + # Transport security uses load["id"] (the minion's authenticated ID) for + # the channel check above. For resource returns the minion embeds the + # resource ID separately so we can remap here, after authentication, so + # the event and job cache are keyed by the resource ID instead. + if "resource_id" in load: + load["id"] = load.pop("resource_id") + try: salt.utils.job.store_job( self.opts, load, event=self.event, mminion=self.mminion @@ -2339,7 +2372,10 @@ async def publish(self, clear_load): delimiter = extra.get("delimiter", DEFAULT_TARGET_DELIM) _res = self.ckminions.check_minions( - clear_load["tgt"], clear_load.get("tgt_type", "glob"), delimiter + clear_load["tgt"], + clear_load.get("tgt_type", "glob"), + delimiter, + fun=clear_load.get("fun"), ) minions = _res.get("minions", list()) missing = _res.get("missing", list()) diff --git a/salt/matchers/compound_match.py b/salt/matchers/compound_match.py index 04da7281e3ee..5438a4470f3c 100644 --- a/salt/matchers/compound_match.py +++ b/salt/matchers/compound_match.py @@ -50,6 +50,8 @@ def match(tgt, opts=None, minion_id=None): "N": None, # Nodegroups should already be expanded "S": "ipcidr", "E": "pcre", + "T": "resource", + "M": "managing_minion", } if HAS_RANGE: ref["R"] = "range" diff --git a/salt/matchers/managing_minion_match.py b/salt/matchers/managing_minion_match.py new file mode 100644 index 000000000000..f18f2c03b906 --- /dev/null +++ b/salt/matchers/managing_minion_match.py @@ -0,0 +1,41 @@ +""" +Minion-side matcher for the ``M@`` managing-minion targeting engine. + +A ``M@`` expression targets a minion directly by its ID, as the entity +*responsible for* a set of resources — rather than targeting the resources +themselves. It is most useful in compound expressions where you want to +constrain a resource target to those owned by a specific minion: + +.. code-block:: text + + salt -C 'M@vcenter-1 and T@vcf_host' + +That expression matches all ``vcf_host`` resources managed by the minion +whose ID is ``vcenter-1``. On its own ``M@vcenter-1`` is equivalent to +``L@vcenter-1``, but pairing it with ``T@`` is its primary use-case. +""" + +import logging + +log = logging.getLogger(__name__) + + +def match(tgt, opts=None, minion_id=None): + """ + Return ``True`` if this minion's ID equals ``tgt``. + + ``tgt`` is the minion ID given after the ``M@`` prefix. The match is + always an exact equality check — no globbing or regex. + + :param str tgt: The minion ID to match against. + :param dict opts: Salt opts dict; defaults to ``__opts__``. + :param str minion_id: The minion ID to evaluate; defaults to ``opts["id"]``. + :rtype: bool + """ + if opts is None: + opts = __opts__ # pylint: disable=undefined-variable + if minion_id is None: + minion_id = opts.get("id", "") + result = minion_id == tgt + log.debug("managing_minion_match: M@%s => %s (id=%s)", tgt, result, minion_id) + return result diff --git a/salt/matchers/resource_match.py b/salt/matchers/resource_match.py new file mode 100644 index 000000000000..cb6ddc7bb378 --- /dev/null +++ b/salt/matchers/resource_match.py @@ -0,0 +1,56 @@ +""" +Minion-side matcher for the ``T@`` resource targeting engine. + +A ``T@`` expression targets Salt Resources managed by this minion. The +pattern is either a bare resource type or a full Salt Resource Name (SRN): + +.. code-block:: text + + T@vcf_host # any resource of this type + T@vcf_host:esxi-01 # one specific resource by SRN + +This matcher is evaluated on the minion. It reads from ``opts["resources"]``, +which is populated when the minion loads its resource modules — analogous to +how ``grain_match`` reads from ``opts["grains"]``. No cache or registry +lookup is performed. +""" + +import logging + +log = logging.getLogger(__name__) + + +def match(tgt, opts=None, minion_id=None): + """ + Return ``True`` if this minion manages at least one resource that matches + the ``T@`` pattern ``tgt``. + + ``tgt`` is the portion of the ``T@`` expression after the ``@``. It is + either a bare resource type (``vcf_host``) or a full SRN + (``vcf_host:esxi-01``). When a bare type is given, every resource of that + type in ``opts["resources"]`` satisfies the match. When a full SRN is + given, only an exact match against a resource ID in ``opts["resources"]`` + satisfies it. + + The structure of ``opts["resources"]`` is populated by the resource module + loader at minion startup, analogous to ``opts["grains"]``. + + :param str tgt: The T@ pattern — a resource type or a full SRN. + :param dict opts: Salt opts dict; defaults to ``__opts__``. + :param str minion_id: The minion ID to evaluate; defaults to ``opts["id"]``. + :rtype: bool + """ + if opts is None: + opts = __opts__ # pylint: disable=undefined-variable + resources = opts.get("resources", {}) + if not resources: + return False + + if ":" in tgt: + resource_type, resource_id = tgt.split(":", 1) + result = resource_id in resources.get(resource_type, []) + else: + result = bool(resources.get(tgt)) + + log.debug("resource_match: T@%s => %s (resources=%s)", tgt, result, list(resources)) + return result diff --git a/salt/minion.py b/salt/minion.py index 45e326949398..621e6b6d6888 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -59,6 +59,7 @@ import salt.utils.network import salt.utils.platform import salt.utils.process +import salt.utils.resources import salt.utils.schedule import salt.utils.ssdp import salt.utils.state @@ -463,6 +464,11 @@ def gen_modules(self, initial_load=False, context=None): pillarenv=self.opts.get("pillarenv"), ).compile_pillar() + # Populate opts["resources"] from pillar now that pillar is available. + # Must happen before the resource loader loop below so that per-type + # execution module loaders are created for the correct set of types. + self.opts["resources"] = self._discover_resources() + self.utils = salt.loader.utils(self.opts, context=context) self.functions = salt.loader.minion_mods( self.opts, utils=self.utils, context=context @@ -474,6 +480,59 @@ def gen_modules(self, initial_load=False, context=None): self.proxy = salt.loader.proxy( self.opts, functions=self.functions, returners=self.returners ) + # Load resource connection modules (salt/resource/*.py) and build + # one execution-module loader per managed resource type. + self.resource_funcs = salt.loader.resource( + self.opts, + functions=self.functions, + utils=self.utils, + context=context, + ) + self.resource_funcs.pack["__salt__"] = self.functions + # Build resource_loaders into a local dict before assigning to + # self.resource_loaders. Without this, the previous pattern: + # + # self.resource_loaders = {} ← exposes empty dict + # for ...: self.resource_loaders[t] = … + # + # creates a window where a concurrent thread (multiprocessing: False) + # calling gen_modules() can read resource_loaders.get(type) == None + # and fail with "No resource loader available". A single dict + # assignment is atomic in CPython, so the old loaders remain visible + # until the new complete set is ready. + _new_resource_loaders = {} + for resource_type in self.opts.get("resources", {}): + rtype_base = ( + f"{self.opts.get('loaded_base_name', 'salt.loaded.int')}" + f".resource.{resource_type}" + ) + _new_resource_loaders[resource_type] = salt.loader.resource_modules( + self.opts, + resource_type, + resource_funcs=self.resource_funcs, + utils=self.utils, + context=context, + loaded_base_name=rtype_base, + ) + self.resource_loaders = _new_resource_loaders + + # Call init() on each resource type so that __context__ is populated + # before any per-resource operations (grains, ping, etc.) are dispatched. + # Mirrors how proxy.init() is called during proxy-minion startup. + for resource_type in self.opts.get("resources", {}): + init_fn = f"{resource_type}.init" + if init_fn in self.resource_funcs: + try: + self.resource_funcs[init_fn](self.opts) + log.debug("Initialized resource type '%s'", resource_type) + except Exception as exc: # pylint: disable=broad-except + log.error( + "Failed to initialize resource type '%s': %s", + resource_type, + exc, + exc_info=True, + ) + # TODO: remove self.function_errors = {} # Keep the funcs clean self.states = salt.loader.states( @@ -493,6 +552,63 @@ def gen_modules(self, initial_load=False, context=None): self.opts, functions=self.functions, proxy=self.proxy, context=context ) + def _discover_resources(self): + """ + Build ``opts["resources"]`` by calling each resource type's + ``discover(opts)`` function. + + Resource types are read from the pillar subtree at + ``opts["pillar"][opts["resource_pillar_key"]]`` (default key + ``"resources"``, configurable via minion option ``resource_pillar_key``). + A temporary resource loader is used to call each type's + ``discover(opts)``; the return value is a dict of + ``{resource_type: [resource_id, ...]}``. + + If the merged pillar contains no key by that name, that is treated the + same as an empty mapping: no pillar-declared resource types, so + discovery returns an empty dict (no stale IDs left in + ``opts["resources"]``). + + If the pillar *does* contain that key (even if its value is empty / + all entries removed), that is an authoritative declaration and the + result reflects only what the pillar says (via ``discover()`` per + type). + + Called from :meth:`gen_modules` after pillar is compiled and before + the per-type execution-module loaders are created. + """ + pillar_resources = salt.utils.resources.pillar_resources_tree(self.opts) + + # A minimal resource loader is sufficient here — discover() only reads + # from the opts dict passed to it and does not need other dunders. + discovery_loader = salt.loader.resource(self.opts) + discovered = {} + for resource_type in pillar_resources: + discover_fn = f"{resource_type}.discover" + if discover_fn not in discovery_loader: + log.warning( + "No resource module found for type '%s'; skipping discovery.", + resource_type, + ) + continue + try: + ids = discovery_loader[discover_fn](self.opts) + if ids: + discovered[resource_type] = list(ids) + log.debug( + "Discovered %d resource(s) of type '%s': %s", + len(ids), + resource_type, + ids, + ) + except Exception as exc: # pylint: disable=broad-except + log.warning( + "Resource discovery failed for type '%s': %s", + resource_type, + exc, + ) + return discovered + @staticmethod def process_schedule(minion, loop_interval): try: @@ -1928,7 +2044,13 @@ async def _handle_decoded_payload_impl(self, data): # Check bypass flag early to prevent deduplication of queued jobs bypass_check = data.get("__ignore_process_count_max", False) if self.jid_queue is not None: - if data["jid"] in self.jid_queue: + if data.get("resource_job"): + # Resource jobs intentionally share the parent job's JID so + # that returns are filed under the same job ID. Skip the + # deduplication gate entirely — each resource is a distinct + # execution even though the JID is the same. + pass + elif data["jid"] in self.jid_queue: if not bypass_check: return else: @@ -2452,12 +2574,18 @@ def _target(cls, minion_instance, opts, data, connected, creds_map): return Minion._thread_return(minion_instance, opts, data) def _execute_job_function( - self, function_name, function_args, executors, opts, data + self, function_name, function_args, executors, opts, data, functions=None ): """ Executes a function within a job given it's name, the args and the executors. It also checks if the function is allowed to run if 'blackout mode' is enabled. + + ``functions`` defaults to ``self.functions`` but callers may pass a + different loader (e.g. a per-resource-type loader) to route execution + to the correct module set. """ + if functions is None: + functions = self.functions minion_blackout_violation = False if self.connected and self.opts["pillar"].get("minion_blackout", False): whitelist = self.opts["pillar"].get("minion_blackout_whitelist", []) @@ -2482,14 +2610,14 @@ def _execute_job_function( "saltutil.refresh_pillar allowed in blackout mode." ) - if function_name in self.functions: - func = self.functions[function_name] + if function_name in functions: + func = functions[function_name] args, kwargs = load_args_and_kwargs(func, function_args, data) else: - # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True + # only run if function_name is not in functions and allow_missing_funcs is True func = function_name args, kwargs = function_args, data - self.functions.pack["__context__"]["retcode"] = 0 + functions.pack["__context__"]["retcode"] = 0 if isinstance(executors, str): executors = [executors] @@ -2551,13 +2679,53 @@ def _thread_return(cls, minion_instance, opts, data): if f"{executor}.allow_missing_func" in minion_instance.executors ] ) + # Resolve which execution-module loader to use. For resource + # jobs we use the per-type loader so that resource-specific + # execution modules (e.g. dummyresource_test.py) take + # precedence over the managing minion's own modules. + # Unknown functions for a resource type fail loudly rather than + # silently falling through to execute on the managing minion. + resource_target = data.get("resource_target") + if resource_target: + resource_type = resource_target["type"] + functions_to_use = minion_instance.resource_loaders.get(resource_type) + if functions_to_use is None: + ret["return"] = ( + f"No resource loader available for type '{resource_type}'. " + "Ensure the resource module exists and the minion is " + "configured to manage resources of this type." + ) + ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC + else: + # Set the per-call resource context via resource_ctxvar. + # contextvars are per-thread, so this value is invisible + # to other threads. LazyLoader.run() calls copy_context() + # fresh on every invocation, capturing this value in the + # snapshot before _run_as executes — fully isolated from + # concurrent resource jobs sharing the same loader object. + import salt.loader.context as _loader_ctx + + _loader_ctx.resource_ctxvar.set(resource_target) + grains_fn = f"{resource_type}.grains" + if grains_fn in minion_instance.resource_funcs: + functions_to_use.pack["__grains__"] = ( + minion_instance.resource_funcs[grains_fn]() + ) + else: + functions_to_use = minion_instance.functions if ( - function_name in minion_instance.functions - or allow_missing_funcs is True + ret.get("retcode") is None + and functions_to_use is not None + and (function_name in functions_to_use or allow_missing_funcs is True) ): try: return_data = minion_instance._execute_job_function( - function_name, function_args, executors, opts, data + function_name, + function_args, + executors, + opts, + data, + functions=functions_to_use, ) log.info( "Job %s execution finished, return_data: %s", @@ -2585,7 +2753,7 @@ def _thread_return(cls, minion_instance, opts, data): else: ret["return"] = return_data - retcode = minion_instance.functions.pack["__context__"].get( + retcode = functions_to_use.pack["__context__"].get( "retcode", salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: @@ -2643,14 +2811,10 @@ def _thread_return(cls, minion_instance, opts, data): ret["out"] = "nested" ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: - # XXX: This can ba extreemly missleading when something outside of a - # execution module call raises a TypeError. Make this it's own - # type of exception when we start validating state and - # execution argument module inputs. msg = "Passed invalid arguments to {}: {}\n{}".format( function_name, exc, - minion_instance.functions[function_name].__doc__ or "", + functions_to_use[function_name].__doc__ or "", ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret["return"] = msg @@ -2665,6 +2829,20 @@ def _thread_return(cls, minion_instance, opts, data): ret["return"] = f"{msg}: {traceback.format_exc()}" ret["out"] = "nested" ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC + elif resource_target: + if functions_to_use is not None: + # Resource type has a loader but function is not implemented. + # Fail loudly rather than silently falling through to the + # managing minion — the caller explicitly targeted a resource. + ret["return"] = ( + f"Function '{function_name}' is not supported for " + f"resource type '{resource_type}'. Implement it in a " + f"'{resource_type}resource_*' execution module." + ) + ret["success"] = False + ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC + ret["out"] = "nested" + # else: no-loader case already populated ret above else: docs = minion_instance.functions["sys.doc"](f"{function_name}*") if docs: @@ -2685,6 +2863,129 @@ def _thread_return(cls, minion_instance, opts, data): ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC ret["out"] = "nested" + # ------------------------------------------------------------------- + # Merge-mode: for state functions the managing minion runs each + # resource's function inline here and folds the results into its own + # state dict. The operator then sees ONE combined block + ONE Summary + # section instead of a separate block per resource. + # ------------------------------------------------------------------- + if ( + not data.get("resource_target") + and data.get("fun") in cls._MERGE_RESOURCE_FUNS + and data.get("resource_targets") + and isinstance(ret.get("return"), dict) + ): + import salt.loader.context as _loader_ctx # noqa: PLC0415 + + _prefix_state_key = minion_instance._prefix_resource_state_key + + run_num_base = ( + max( + ( + v.get("__run_num__", 0) + for v in ret["return"].values() + if isinstance(v, dict) + ), + default=0, + ) + + 1 + ) + + for resource in data["resource_targets"]: + rid = resource["id"] + rtype = resource["type"] + resource_loader = getattr( + minion_instance, "resource_loaders", {} + ).get(rtype) + if resource_loader is None: + ret["return"][f"no_|-{rid}_|-{rid}_|-None"] = { + "result": False, + "comment": ( + f"No resource loader for type '{rtype}'. " + "Ensure the resource module exists." + ), + "name": rid, + "changes": {}, + "__run_num__": run_num_base, + } + run_num_base += 1 + if ret.get("retcode") == salt.defaults.exitcodes.EX_OK: + ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC + continue + + if function_name not in resource_loader: + # Function not implemented for this resource type — + # same message the separate-job path would return. + resource_return = ( + f"Function '{function_name}' is not supported for " + f"resource type '{rtype}'. Implement it in a " + f"'{rtype}resource_*' execution module." + ) + else: + token = _loader_ctx.resource_ctxvar.set(resource) + try: + resource_return = minion_instance._execute_job_function( + function_name, + function_args, + executors, + opts, + data, + functions=resource_loader, + ) + except Exception as exc: # pylint: disable=broad-except + log.error( + "Inline resource execution for '%s' raised: %s", + rid, + exc, + exc_info=True, + ) + resource_return = ( + f"ERROR running {function_name} for '{rid}': {exc}" + ) + finally: + _loader_ctx.resource_ctxvar.reset(token) + + if isinstance(resource_return, dict): + for state_id, state_val in resource_return.items(): + if isinstance(state_val, dict): + entry = dict(state_val) + entry["__run_num__"] = run_num_base + else: + entry = { + "result": True, + "comment": str(state_val), + "name": f"[{rid}]", + "changes": {}, + "__run_num__": run_num_base, + } + run_num_base += 1 + ret["return"][_prefix_state_key(state_id, rid)] = entry + r_retcode = resource_loader.pack["__context__"].get( + "retcode", 0 + ) + if ( + r_retcode + and ret.get("retcode") == salt.defaults.exitcodes.EX_OK + ): + ret["retcode"] = r_retcode + else: + # String result means the resource couldn't fulfill the + # operation (e.g. "not supported" for dummy resources). + # Mark as False — the state was NOT applied, so reporting + # True would silently mask unactioned resources. + ret["return"][f"no_|-{rid}_|-{rid}_|-None"] = { + "result": False, + "comment": str(resource_return), + "name": rid, + "changes": {}, + "__run_num__": run_num_base, + } + run_num_base += 1 + if ret.get("retcode") == salt.defaults.exitcodes.EX_OK: + ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC + + ret["success"] = ret.get("retcode") == salt.defaults.exitcodes.EX_OK + if isinstance(ret["return"], dict) and ret["return"].get("__no_return__"): # This is used to suppress the return for queued jobs # The job will be executed later and will return then @@ -2698,6 +2999,11 @@ def _thread_return(cls, minion_instance, opts, data): ret["jid"] = data["jid"] ret["fun"] = data["fun"] ret["fun_args"] = data["arg"] + if data.get("resource_target"): + log.info( + "resource_target in _thread_return: %s", data["resource_target"] + ) + ret["resource_id"] = data["resource_target"]["id"] if "user" in data: ret["user"] = data["user"] if "master_id" in data: @@ -3171,6 +3477,32 @@ async def _fire_master_minion_start(self): include_startup_grains=include_grains, ) + async def _register_resources_with_master(self): + """ + Send this minion's resource list to the master for registry population. + + Called on startup (and reconnect) so that the master's + ``minion_resources`` cache bank is up-to-date. This allows + :class:`salt.utils.minions.CkMinions` to include resource IDs when + expanding glob / non-compound targets (e.g. ``salt '*' test.ping``). + + An empty resource dict is sent deliberately when the minion has no + resources — this clears any stale entries left by a previous + registration (e.g. after a resource type is removed from the pillar). + """ + resources = self.opts.get("resources", {}) + load = { + "cmd": "_register_resources", + "id": self.opts["id"], + "resources": resources, + "tok": self.tok, + } + try: + await self._send_req_async_main(load, timeout=self._return_retry_timer()) + log.debug("Registered resources with master: %s", list(resources.keys())) + except Exception as err: # pylint: disable=broad-except + log.warning("Unable to register resources with master: %s", err) + def module_refresh(self, force_refresh=False, notify=False): """ Refresh the functions and returners. @@ -3278,6 +3610,12 @@ async def pillar_refresh(self, force_refresh=False, clean_cache=False): ) self.opts["pillar"] = new_pillar self.functions.pack["__pillar__"] = self.opts["pillar"] + # Re-discover resources now that pillar has changed. Must + # happen *after* opts["pillar"] is updated so that + # _discover_resources sees the new resource declarations (or + # their absence when a type is removed from the pillar). + self.opts["resources"] = self._discover_resources() + await self._register_resources_with_master() finally: async_pillar.destroy() self.matchers_refresh() @@ -3497,6 +3835,9 @@ async def handle_event(self, package): _minion.beacons_refresh() elif tag.startswith("matchers_refresh"): _minion.matchers_refresh() + elif tag.startswith("resource_refresh"): + _minion.opts["resources"] = _minion._discover_resources() + _minion.io_loop.create_task(_minion._register_resources_with_master()) elif tag.startswith("manage_schedule"): _minion.manage_schedule(tag, data) elif tag.startswith("manage_beacons"): @@ -3614,6 +3955,7 @@ async def handle_event(self, package): self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) await self._fire_master_minion_start() + await self._register_resources_with_master() log.info("Minion is ready to receive requests!") # update scheduled job to run with the new master addr @@ -4070,6 +4412,7 @@ def tune_in(self, start=True): self.sync_connect_master() if self.connected: self.io_loop.create_task(self._fire_master_minion_start()) + self.io_loop.create_task(self._register_resources_with_master()) log.info("Minion is ready to receive requests!") # Make sure to gracefully handle SIGUSR1 @@ -4152,7 +4495,26 @@ def ping_timeout_handler(*_): async def _handle_payload(self, payload): if payload is not None and payload["enc"] == "aes": if self._target_load(payload["load"]): - await self._handle_decoded_payload(payload["load"]) + load = payload["load"] + + if load.get("minion_is_target", True): + await self._handle_decoded_payload(load) + + # For merge-mode functions (state.apply etc.) resources are + # executed inline inside _thread_return and folded into the + # managing minion's own response. Dispatching them as + # separate jobs would send duplicate responses the master is + # no longer waiting for. + if load.get("fun") not in self._MERGE_RESOURCE_FUNS: + for resource in load.get("resource_targets", []): + resource_load = dict(load) + resource_load["resource_target"] = resource + # Flag so _handle_decoded_payload_impl can bypass JID + # deduplication — resource jobs share the parent JID by + # design but are independent executions. + resource_load["resource_job"] = True + await self._handle_decoded_payload(resource_load) + elif self.opts["zmq_filtering"]: # In the filtering enabled case, we'd like to know when minion sees something it shouldn't log.trace( @@ -4188,16 +4550,151 @@ def _target_load(self, load): return False if load["tgt_type"] in ("grain", "grain_pcre", "pillar"): delimiter = load.get("delimiter", DEFAULT_TARGET_DELIM) - if not match_func(load["tgt"], delimiter=delimiter): - return False - elif not match_func(load["tgt"]): - return False + minion_matches = match_func(load["tgt"], delimiter=delimiter) + else: + minion_matches = match_func(load["tgt"]) else: - if not self.matchers["glob_match.match"](load["tgt"]): - return False + minion_matches = self.matchers["glob_match.match"](load["tgt"]) + + resource_targets = self._resolve_resource_targets(load) + load["resource_targets"] = resource_targets + load["minion_is_target"] = bool( + minion_matches + ) and not self._is_pure_resource_target(load) + if not load["minion_is_target"] and not resource_targets: + return False return True + def _is_pure_resource_target(self, load): + """ + Return True when the target expression contains only T@/M@ engines with + no glob/grain/pillar/list terms that would match the minion itself. + """ + tgt = load.get("tgt", "") + tgt_type = load.get("tgt_type", "glob") + if tgt_type != "compound": + return False + words = tgt.split() if isinstance(tgt, str) else list(tgt) + opers = {"and", "or", "not", "(", ")"} + return all( + w in opers or w.startswith("T@") or w.startswith("M@") for w in words + ) + + # Functions that are internal Salt plumbing and should never be dispatched + # to managed resources. Resources don't participate in job-status queries, + # module refreshes, or other minion-only housekeeping calls. + _NO_RESOURCE_FUNS = frozenset( + { + "saltutil.find_job", + "saltutil.running", + "saltutil.is_running", + "saltutil.kill_job", + "saltutil.signal_job", + "saltutil.term_job", + "saltutil.refresh_grains", + "saltutil.sync_all", + "saltutil.sync_grains", + "saltutil.sync_modules", + "sys.reload_modules", + } + ) + + # Functions where resource results are merged into the managing minion's + # own response rather than dispatched as independent jobs. This produces + # ONE combined block + Summary section per managing minion instead of + # separate blocks per resource, matching how any other minion looks. + _MERGE_RESOURCE_FUNS = frozenset( + { + "state.apply", + "state.highstate", + "state.sls", + "state.sls_id", + "state.single", + } + ) + + @staticmethod + def _prefix_resource_state_key(sid, rid): + """Re-label the ID/name components of a state result key with rid. + + Key format: {module}_|-{id}_|-{name}_|-{function} + Only comps[1] and comps[2] (id and name) are prefixed so the + highstate formatter still reads {comps[0]}.{comps[3]} correctly, + preserving ``Function: pkg.installed`` while showing ``ID: node1 curl``. + """ + parts = sid.split("_|-", 3) + if len(parts) == 4: + parts[1] = f"{rid} {parts[1]}" + parts[2] = f"{rid} {parts[2]}" + return "_|-".join(parts) + return f"no_|-{rid}_|-{rid}_|-None" + + def _resolve_resource_targets(self, load): + """ + Return the list of per-resource dicts ``{"id": ..., "type": ...}`` that + the target expression matches against ``opts["resources"]``. + + For wildcard glob targets (e.g. ``salt '*'``), returns all managed + resources so that the command also runs against resources. + For compound T@ targets, returns only the matched resources. + For specific-name glob targets (e.g. ``salt 'minion'``), grain, list, + pillar, or compound expressions with no T@ terms, returns an empty list + — the operator is targeting the minion itself, not its resources. + Internal/plumbing functions (see ``_NO_RESOURCE_FUNS``) are never + dispatched to resources. + """ + resources = self.opts.get("resources", {}) + if not resources: + return [] + + if load.get("fun") in self._NO_RESOURCE_FUNS: + return [] + + tgt = load.get("tgt", "") + tgt_type = load.get("tgt_type", "glob") + + if tgt_type == "compound": + words = tgt.split() if isinstance(tgt, str) else list(tgt) + opers = {"and", "or", "not", "(", ")"} + targets = [] + for word in words: + if word in opers: + continue + if word.startswith("T@"): + pattern = word[2:] + if ":" in pattern: + rtype, rid = pattern.split(":", 1) + if not rid: + # "T@type:" with trailing colon — treat as bare type + for r in resources.get(rtype, []): + targets.append({"id": r, "type": rtype}) + elif rid in resources.get(rtype, []): + targets.append({"id": rid, "type": rtype}) + else: + for rid in resources.get(pattern, []): + targets.append({"id": rid, "type": pattern}) + return targets + + # For glob targets, only dispatch to resources when the pattern + # contains a wildcard. A bare name like ``salt 'minion' test.ping`` + # targets the minion itself; it should not implicitly run against its + # resources. ``salt '*' test.ping`` or ``salt 'web*' test.ping`` + # opts in to resource dispatch. + if ( + tgt_type == "glob" + and isinstance(tgt, str) + and not any(c in tgt for c in ("*", "?", "[")) + ): + return [] + + # Wildcard glob — dispatch to all managed resources. + all_resources = [] + for rtype, rids in resources.items(): + for rid in rids: + all_resources.append({"id": rid, "type": rtype}) + return all_resources + def destroy(self): """ Tear down the minion diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index 2c51c45e0b6e..29803f29d75b 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -79,6 +79,9 @@ # Overwriting the cmd python module makes debugging modules with pdb a bit # harder so let's do it this way instead. def __virtual__(): + # Yield to resource-type override modules (e.g. sshresource_cmd.py). + if __opts__.get("resource_type"): # pylint: disable=undefined-variable + return False, "cmd: not loaded in resource-type loaders" return __virtualname__ diff --git a/salt/modules/dummyresource_test.py b/salt/modules/dummyresource_test.py new file mode 100644 index 000000000000..524292939f11 --- /dev/null +++ b/salt/modules/dummyresource_test.py @@ -0,0 +1,50 @@ +""" +Provide the ``test`` execution module for the dummy resource type. + +This is the resource analogue of ``salt/modules/dummyproxy_test.py``. + +Because this module is loaded into an isolated per-type +:func:`salt.loader.resource_modules` loader (``opts["resource_type"]`` is +set to ``"dummy"`` for that loader), it takes priority over the standard +``salt/modules/test.py`` for all calls dispatched to dummy resources. + +Unlike proxy Pattern B modules that must handle *two* contexts at call time +(resource vs. managing minion), this module is **only ever invoked for +resource jobs**: the managing minion's own jobs continue to use the standard +execution modules loaded in the regular ``self.functions`` loader. +""" + +import logging + +log = logging.getLogger(__name__) + +__virtualname__ = "test" + + +def __virtual__(): + """ + Load only when this loader is scoped to the ``dummy`` resource type. + """ + if __opts__.get("resource_type") == "dummy": + return __virtualname__ + return ( + False, + "dummyresource_test: only loads in a dummy-resource-type loader.", + ) + + +def ping(): + """ + Return ``True`` if the targeted dummy resource is responsive. + + Delegates to :func:`salt.resource.dummy.ping` via ``__resource_funcs__`` + so the result reflects the actual state of the resource rather than the + managing minion. + + CLI Example: + + .. code-block:: bash + + salt -C 'T@dummy:dummy-01' test.ping + """ + return __resource_funcs__["dummy.ping"]() # pylint: disable=undefined-variable diff --git a/salt/modules/saltutil.py b/salt/modules/saltutil.py index 486c21d02b65..eaccc9db230e 100644 --- a/salt/modules/saltutil.py +++ b/salt/modules/saltutil.py @@ -400,6 +400,69 @@ def refresh_grains(**kwargs): return True +def refresh_resources(): + """ + Signal the minion to re-discover its managed resources from current pillar + data and re-register them with the master. + + This fires a ``resource_refresh`` event on the minion bus. The minion + handles the event by calling ``_discover_resources()`` (using the current + ``opts["pillar"]``) and then re-registering the result with the master's + ``minion_resources`` cache. + + CLI Example: + + .. code-block:: bash + + salt '*' saltutil.refresh_resources + """ + try: + return __salt__["event.fire"]({}, "resource_refresh") + except KeyError: + return False + + +def sync_resources( + saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None +): + """ + Sync custom resource-type modules from ``salt://_resources`` to the minion + and signal the minion to re-discover its managed resources from pillar data + and re-register them with the master. + + saltenv + The fileserver environment from which to sync. To sync from more than + one environment, pass a comma-separated list. + + If not passed, then all environments configured in the :ref:`top files + ` will be checked for resource modules to sync. If no top + files are found, then the ``base`` environment will be synced. + + refresh : True + If ``True``, signal the minion to re-discover its managed resources + and re-register them with the master. This refresh will be performed + even if no new resource modules are synced. Set to ``False`` to + prevent this refresh. + + extmod_whitelist : None + comma-separated list of modules to sync + + extmod_blacklist : None + comma-separated list of modules to blacklist + + CLI Example: + + .. code-block:: bash + + salt '*' saltutil.sync_resources + salt '*' saltutil.sync_resources saltenv=base,dev + """ + ret = _sync("resources", saltenv, extmod_whitelist, extmod_blacklist) + if refresh: + refresh_resources() + return ret + + def sync_grains( saltenv=None, refresh=True, @@ -1208,6 +1271,9 @@ def sync_all( saltenv, False, extmod_whitelist, extmod_blacklist ) ret["matchers"] = sync_matchers(saltenv, False, extmod_whitelist, extmod_blacklist) + ret["resources"] = sync_resources( + saltenv, False, extmod_whitelist, extmod_blacklist + ) if __opts__["file_client"] == "local": ret["pillar"] = sync_pillar(saltenv, False, extmod_whitelist, extmod_blacklist) ret["wrapper"] = sync_wrapper( diff --git a/salt/modules/sshresource_cmd.py b/salt/modules/sshresource_cmd.py new file mode 100644 index 000000000000..eed91d20aa9e --- /dev/null +++ b/salt/modules/sshresource_cmd.py @@ -0,0 +1,132 @@ +""" +Execution module override for the ``ssh`` resource type. + +This module is loaded into the per-type execution-module loader whenever the +``resource_type`` in opts is ``"ssh"``. It shadows the standard +``salt.modules.cmdmod`` and ``salt.modules.test`` functions for jobs that +are dispatched to SSH resources, delegating the actual work to +:mod:`salt.resource.ssh` via ``__resource_funcs__``. + +Because this loader is **only ever used for resource jobs**, there is no need +for the call-time proxy-style guard (``if salt.utils.platform.is_proxy()``). +The managing minion's own jobs continue to use the standard execution modules +loaded in the regular ``self.functions`` loader. + +Usage +----- +Any execution module function that should behave differently when targeting +an SSH resource can be implemented here. Functions not defined in this +module fall through to the standard execution modules in the resource loader. + +Example +------- + +.. code-block:: bash + + # Ping an SSH resource + salt -C 'T@ssh:web-01' test.ping + + # Run a shell command on an SSH resource + salt -C 'T@ssh:web-01' cmd.run 'uptime' + salt -C 'T@ssh' cmd.run 'df -h' +""" + +import logging + +# __resource_funcs__ is injected by the per-type loader at runtime. +# pylint: disable=undefined-variable + +log = logging.getLogger(__name__) + +__virtualname__ = "cmd" + + +def __virtual__(): + """ + Load only when this execution-module loader is scoped to the ``ssh`` + resource type. + """ + if __opts__.get("resource_type") == "ssh": # pylint: disable=undefined-variable + return __virtualname__ + return False, "sshresource_cmd: only loads in an ssh-resource-type loader." + + +# --------------------------------------------------------------------------- +# cmd.* surface +# --------------------------------------------------------------------------- + + +def run( + cmd, + timeout=None, + **kwargs, +): + """ + Execute a shell command on the targeted SSH resource and return its + standard output. + + This is the SSH-resource equivalent of :func:`salt.modules.cmdmod.run`. + The command is executed directly on the remote host via the SSH Shell + transport — no Salt thin deployment required. + + :param str cmd: The shell command to run on the remote host. + :param int timeout: Optional SSH connection timeout in seconds for this + call. Overrides the per-resource ``timeout`` configured in Pillar. + :rtype: str — stdout from the remote command + + CLI Example: + + .. code-block:: bash + + salt -C 'T@ssh:web-01' cmd.run 'uptime' + salt -C 'T@ssh' cmd.run 'df -h' timeout=60 + """ + result = __resource_funcs__["ssh.cmd_run"]( + cmd, timeout=timeout + ) # pylint: disable=undefined-variable + return result.get("stdout", "") + + +def run_all(cmd, timeout=None, **kwargs): + """ + Execute a shell command on the targeted SSH resource and return a dict + containing ``stdout``, ``stderr``, and ``retcode``. + + This mirrors :func:`salt.modules.cmdmod.run_all` for SSH resources. + + :param str cmd: The shell command to run on the remote host. + :param int timeout: Optional SSH connection timeout in seconds for this + call. + :rtype: dict + + CLI Example: + + .. code-block:: bash + + salt -C 'T@ssh:web-01' cmd.run_all 'uptime' + """ + return __resource_funcs__["ssh.cmd_run"]( + cmd, timeout=timeout + ) # pylint: disable=undefined-variable + + +def retcode(cmd, timeout=None, **kwargs): + """ + Execute a shell command on the targeted SSH resource and return only the + exit code. + + :param str cmd: The shell command to run on the remote host. + :param int timeout: Optional SSH connection timeout in seconds for this + call. + :rtype: int + + CLI Example: + + .. code-block:: bash + + salt -C 'T@ssh:web-01' cmd.retcode 'test -f /etc/salt/minion' + """ + result = __resource_funcs__["ssh.cmd_run"]( + cmd, timeout=timeout + ) # pylint: disable=undefined-variable + return result.get("retcode", 1) diff --git a/salt/modules/sshresource_pkg.py b/salt/modules/sshresource_pkg.py new file mode 100644 index 000000000000..3415a11e3052 --- /dev/null +++ b/salt/modules/sshresource_pkg.py @@ -0,0 +1,184 @@ +""" +Execution module override for the ``ssh`` resource type — ``pkg.*`` surface. + +Implements package management against SSH resources by running the +appropriate package-manager commands on the remote host via the SSH Shell +transport. Mirrors the interface of ``salt.modules.aptpkg`` / +``salt.modules.yumpkg`` for the functions most commonly called by state +modules (``pkg.installed``, ``pkg.removed``, etc.). + +The managing minion detects the remote OS family from the resource grains +and dispatches to the correct package-manager command set at call time. +""" + +import logging + +# __resource_funcs__ is injected by the per-type loader at runtime. +# pylint: disable=undefined-variable + +log = logging.getLogger(__name__) + +__virtualname__ = "pkg" + + +def __virtual__(): + if __opts__.get("resource_type") == "ssh": # pylint: disable=undefined-variable + return __virtualname__ + return False, "sshresource_pkg: only loads in an ssh-resource-type loader." + + +# --------------------------------------------------------------------------- +# Internal helpers +# --------------------------------------------------------------------------- + + +def _run(cmd, timeout=None): + """Run a shell command on the remote resource, return (stdout, retcode).""" + result = __resource_funcs__["ssh.cmd_run"]( + cmd, timeout=timeout + ) # pylint: disable=undefined-variable + return result.get("stdout", ""), result.get("retcode", 1) + + +def _pkg_manager(): + """ + Return the package-manager command appropriate for the remote OS. + + Inspects the ``os_family`` grain so we can support both Debian/Ubuntu + (``apt-get``) and RedHat/CentOS (``yum`` / ``dnf``) targets. + """ + grains = ( + __grains__ if isinstance(__grains__, dict) else __grains__.value() + ) # pylint: disable=undefined-variable + os_family = grains.get("os_family", "").lower() + if os_family in ("debian", "ubuntu"): + return "apt-get" + if os_family in ("redhat", "centos", "fedora", "suse"): + return "yum" + # Fallback: try apt-get then yum + return "apt-get" + + +# --------------------------------------------------------------------------- +# pkg.* surface +# --------------------------------------------------------------------------- + + +def install(name=None, pkgs=None, sources=None, **kwargs): + """ + Install one or more packages on the SSH resource. + + CLI Example: + + .. code-block:: bash + + salt -C 'T@ssh:node1' pkg.install curl + salt -C 'T@ssh:node1' pkg.install pkgs='[curl, git]' + """ + pkg_mgr = _pkg_manager() + if pkgs: + names = " ".join(pkgs if isinstance(pkgs, list) else [pkgs]) + elif name: + names = name + else: + return {} + + env = "DEBIAN_FRONTEND=noninteractive " if "apt" in pkg_mgr else "" + cmd = f"{env}{pkg_mgr} install -y {names}" + stdout, retcode = _run(cmd, timeout=kwargs.get("timeout")) + + if retcode != 0: + log.warning("pkg.install failed for %s: %s", names, stdout) + return {"result": False, "comment": stdout} + return {"result": True, "comment": stdout} + + +def remove(name=None, pkgs=None, **kwargs): + """ + Remove one or more packages from the SSH resource. + + CLI Example: + + .. code-block:: bash + + salt -C 'T@ssh:node1' pkg.remove curl + """ + pkg_mgr = _pkg_manager() + if pkgs: + names = " ".join(pkgs if isinstance(pkgs, list) else [pkgs]) + elif name: + names = name + else: + return {} + + cmd = f"{pkg_mgr} remove -y {names}" + stdout, retcode = _run(cmd, timeout=kwargs.get("timeout")) + + if retcode != 0: + log.warning("pkg.remove failed for %s: %s", names, stdout) + return {"result": False, "comment": stdout} + return {"result": True, "comment": stdout} + + +def version(*names, **kwargs): + """ + Return the installed version of the given package(s). + + Returns a string for a single package or a dict for multiple packages. + + CLI Example: + + .. code-block:: bash + + salt -C 'T@ssh:node1' pkg.version curl + """ + grains = ( + __grains__ if isinstance(__grains__, dict) else __grains__.value() + ) # pylint: disable=undefined-variable + os_family = grains.get("os_family", "").lower() + + versions = {} + for name in names: + if os_family in ("debian", "ubuntu"): + stdout, retcode = _run( + f"dpkg-query -W -f='${{Version}}' {name} 2>/dev/null" + ) + else: + stdout, retcode = _run( + f"rpm -q --queryformat '%{{VERSION}}' {name} 2>/dev/null" + ) + versions[name] = stdout.strip() if retcode == 0 else "" + + if len(names) == 1: + return versions[names[0]] + return versions + + +def list_pkgs(**kwargs): + """ + List all installed packages on the SSH resource. + + Returns a dict of ``{name: version}``. + + CLI Example: + + .. code-block:: bash + + salt -C 'T@ssh:node1' pkg.list_pkgs + """ + grains = ( + __grains__ if isinstance(__grains__, dict) else __grains__.value() + ) # pylint: disable=undefined-variable + os_family = grains.get("os_family", "").lower() + + if os_family in ("debian", "ubuntu"): + stdout, _ = _run("dpkg-query -W -f='${Package} ${Version}\\n'") + else: + stdout, _ = _run("rpm -qa --queryformat '%{NAME} %{VERSION}-%{RELEASE}\\n'") + + pkgs = {} + for line in stdout.splitlines(): + parts = line.strip().split(None, 1) + if len(parts) == 2: + pkgs[parts[0]] = parts[1] + return pkgs diff --git a/salt/modules/sshresource_state.py b/salt/modules/sshresource_state.py new file mode 100644 index 000000000000..45254871f24f --- /dev/null +++ b/salt/modules/sshresource_state.py @@ -0,0 +1,507 @@ +""" +State module for the ``ssh`` resource type. + +Implements ``state.highstate``, ``state.sls``, and ``state.apply`` for SSH +resources by replicating the salt-ssh state-execution pipeline on the +managing minion: + +1. **Compile** — ``SSHHighState`` reads state and pillar files from the + master via the minion's ``RemoteClient``. The resource ID is used as + the top-file target, so only states mapped to that ID are compiled. + +2. **Package** — ``prep_trans_tar`` bundles the compiled low state, all + referenced ``salt://`` files, and the rendered pillar into a transport + tar (``salt_state.tgz``). + +3. **Execute** — The tar is SCP'd to the remote host's ``thin_dir`` and + ``state.pkg`` is invoked via the salt-thin bundle, returning structured + JSON results. + +This mirrors what ``salt-ssh state.highstate`` does when invoked from the +master, but runs from the managing minion's process so the salt-ssh +initiator is the minion, not the master. +""" + +import logging +import os +import uuid + +import salt.client.ssh +import salt.client.ssh.shell +import salt.client.ssh.state +import salt.client.ssh.wrapper +import salt.defaults.exitcodes +import salt.fileclient +import salt.utils.hashutils +import salt.utils.network +import salt.utils.state +from salt.client.ssh.wrapper.state import ( + _cleanup_slsmod_low_data, + _merge_extra_filerefs, +) +from salt.resource.ssh import CONTEXT_KEY + +log = logging.getLogger(__name__) +log.info("sshresource_state: module imported, __name__=%s", __name__) + +__virtualname__ = "state" +__func_alias__ = {"apply_": "apply"} + + +def __virtual__(): + if __opts__.get("resource_type") == "ssh": # pylint: disable=undefined-variable + log.info( + "sshresource_state: LOADING for ssh resource type (opts id=%s)", + __opts__.get("id"), + ) # pylint: disable=undefined-variable + return __virtualname__ + return False, "sshresource_state: only loads in an ssh-resource-type loader." + + +# --------------------------------------------------------------------------- +# Internal helpers +# --------------------------------------------------------------------------- + + +def _resource_id(): + return __resource__["id"] # pylint: disable=undefined-variable + + +def _host_cfg(): + resource_id = _resource_id() + return __context__[CONTEXT_KEY]["hosts"].get( + resource_id, {} + ) # pylint: disable=undefined-variable + + +def _relenv_path(): + """ + Return the path to a pre-built relenv tarball if one exists locally, otherwise + return ``None`` to let ``Single.__init__`` detect the remote arch and download + the correct tarball itself. + + The tarball is generated by ``salt.utils.relenv.gen_relenv``, which normalises + the arch to ``x86_64`` or ``arm64`` before building the cache path, so the + lookup uses those canonical names. + """ + cachedir = __opts__.get("cachedir", "") # pylint: disable=undefined-variable + for arch in ("x86_64", "arm64"): + path = os.path.join(cachedir, "relenv", "linux", arch, "salt-relenv.tar.xz") + if os.path.exists(path): + return path + return None + + +def _target_opts(): + """ + Build a copy of ``__opts__`` suitable for ``SSHHighState`` and ``Single``. + + * Sets ``id`` to the resource ID so the top file matches the right host. + * Injects ``_ssh_version`` and host-key policy from the resource config. + * ``thin_dir`` is populated later as a side-effect of ``Single.__init__``. + """ + resource_id = _resource_id() + cfg = _host_cfg() + opts = dict(__opts__) # pylint: disable=undefined-variable + opts["id"] = resource_id + opts.pop("resource_type", None) + opts["_ssh_version"] = ( + __context__.get(CONTEXT_KEY, {}).get( + "_ssh_version" + ) # pylint: disable=undefined-variable + or salt.client.ssh.ssh_version() + ) + opts["no_host_keys"] = cfg.get("no_host_keys", opts.get("no_host_keys", False)) + opts["ignore_host_keys"] = cfg.get( + "ignore_host_keys", opts.get("ignore_host_keys", False) + ) + if "known_hosts_file" in cfg: + opts["known_hosts_file"] = cfg["known_hosts_file"] + opts["relenv"] = True + return opts + + +def _connection_kwargs(): + """Return SSH connection kwargs for ``Single`` from the resource config.""" + cfg = _host_cfg() + return { + "host": cfg["host"], + "user": cfg.get("user", "root"), + "port": cfg.get("port", 22), + "passwd": cfg.get("passwd"), + "priv": cfg.get("priv"), + "priv_passwd": cfg.get("priv_passwd"), + "timeout": cfg.get("timeout", 60), + "sudo": cfg.get("sudo", False), + "tty": cfg.get("tty", False), + "identities_only": cfg.get("identities_only", False), + "ssh_options": cfg.get("ssh_options"), + "keepalive": cfg.get("keepalive", True), + "keepalive_interval": cfg.get("keepalive_interval", 60), + "keepalive_count_max": cfg.get("keepalive_count_max", 3), + } + + +def _thin_dir(): + """ + Return the remote working directory for the salt-thin bundle. + + Mirrors the logic in ``salt.resource.ssh._thin_dir``: uses the per-host + ``thin_dir`` config key when set, otherwise builds a path under ``/tmp/`` + (always world-writable) to avoid ``/var/tmp/`` which may be root-only. + """ + cfg = _host_cfg() + if "thin_dir" in cfg: + return cfg["thin_dir"] + fqdn_uuid = uuid.uuid3(uuid.NAMESPACE_DNS, salt.utils.network.get_fqhostname()).hex[ + :6 + ] + return "/tmp/.{}_{}_salt".format(cfg.get("user", "root"), fqdn_uuid) + + +def _seed_thin_dir(opts): + """ + Compute ``thin_dir`` and write it into *opts* so that ``SSHHighState`` + and ``prep_trans_tar`` use a consistent, writable path. + """ + thin = _thin_dir() + opts["thin_dir"] = thin + return thin + + +def _get_initial_pillar(opts): + """ + Return the managing minion's rendered pillar for state compilation. + + Passing a non-None, non-empty value as ``initial_pillar`` to ``SSHHighState`` + causes ``State.__init__`` to skip ``_gather_pillar()`` (which would otherwise + try to compile pillar for the resource ID as a regular minion). We use the + managing minion's own pillar — it contains the resource configuration anyway + and avoids a spurious pillar-compile for an unknown minion ID. + + Returns ``None`` only as a last resort so the caller can decide how to handle + missing pillar. + """ + raw = __opts__.get("pillar") # pylint: disable=undefined-variable + if raw is None: + return None + try: + val = raw.value() + except AttributeError: + val = raw + # An empty dict is falsy in state.py's `if initial_pillar` check, which + # would re-trigger _gather_pillar. Return None explicitly so callers know + # there is no cached pillar rather than silently skipping the right path. + return val if isinstance(val, dict) and val else None + + +def _file_client(): + """ + Return a file client suitable for ``SSHHighState`` state compilation. + + Uses the master opts cached during ``ssh.init()`` to create an + ``FSClient`` — a local-filesystem file client identical to the one the + salt-ssh master uses. This avoids creating a new authenticated network + channel from inside a minion job thread (which has tornado IO-loop + complications). + + Falls back to a ``RemoteClient`` if no cached master opts are available + (e.g. on first run before a full restart). + """ + master_opts = __context__.get(CONTEXT_KEY, {}).get( + "master_opts" + ) # pylint: disable=undefined-variable + log.debug( + "sshresource_state._file_client: master_opts cached=%s, file_roots=%s", + master_opts is not None, + (master_opts or {}).get("file_roots"), + ) + if master_opts: + mo = dict(master_opts) + mo.setdefault( + "cachedir", __opts__.get("cachedir", "") + ) # pylint: disable=undefined-variable + return salt.fileclient.FSClient(mo) + log.warning( + "sshresource_state: no cached master opts in context, " + "falling back to RemoteClient for file access" + ) + return salt.fileclient.get_file_client( + __opts__ + ) # pylint: disable=undefined-variable + + +# --------------------------------------------------------------------------- +# Public state functions +# --------------------------------------------------------------------------- + + +def highstate(test=None, **kwargs): + """ + Apply the highstate to the targeted SSH resource. + + Compiles the highstate on the managing minion using the resource ID as the + top-file target, packages all state files into a transport tar, SCPs the + tar to the remote host, and runs ``state.pkg`` via the salt-thin bundle. + + CLI Example: + + .. code-block:: bash + + salt -C 'T@ssh:node1' state.highstate + salt -C 'T@ssh:node1' state.highstate test=True + """ + opts = _target_opts() + _seed_thin_dir(opts) + + initial_pillar = _get_initial_pillar(opts) + pillar_override = kwargs.get("pillar") + extra_filerefs = kwargs.get("extra_filerefs", "") + + opts = salt.utils.state.get_sls_opts(opts, **kwargs) + if test is None: + test = opts.get("test", False) + opts["test"] = test + + file_client = _file_client() + log.debug( + "sshresource_state.highstate: file_client=%s initial_pillar_type=%s", + type(file_client).__name__, + type(initial_pillar).__name__, + ) + log.debug( + "sshresource_state.highstate: file_client.envs()=%s", + file_client.envs(), + ) + # SSHHighState.__exit__ calls file_client.destroy(), so no separate finally needed. + with salt.client.ssh.state.SSHHighState( + opts, + pillar_override, + __salt__, # pylint: disable=undefined-variable + file_client, + initial_pillar=initial_pillar, + ) as st_: + try: + pillar = st_.opts["pillar"].value() + except AttributeError: + pillar = st_.opts["pillar"] + + st_.push_active() + chunks_or_errors = st_.compile_low_chunks() + log.debug( + "sshresource_state.highstate: compile_low_chunks returned %s", + chunks_or_errors, + ) + + for chunk in chunks_or_errors: + if not isinstance(chunk, dict): + return chunks_or_errors + + if not chunks_or_errors: + # Top file has no match for this resource ID — no SSH round-trip needed. + # Return a state dict using the same key format salt uses for a regular + # minion's "No Top file" entry so the merged output is consistent. + rid = _resource_id() + return { + "no_|-states_|-states_|-None": { + "result": False, + "comment": ( + f"No Top file or master_tops data matches found for" + f" resource '{rid}'." + ), + "name": "states", + "changes": {}, + "__run_num__": 0, + } + } + + file_refs = salt.client.ssh.state.lowstate_file_refs( + chunks_or_errors, + _merge_extra_filerefs( + extra_filerefs, + opts.get("extra_filerefs", ""), + ), + ) + _cleanup_slsmod_low_data(chunks_or_errors) + trans_tar = salt.client.ssh.state.prep_trans_tar( + file_client, + chunks_or_errors, + file_refs, + pillar, + _resource_id(), + ) + + return _exec_state_pkg(opts, trans_tar, test) + + +def sls(mods, saltenv="base", test=None, **kwargs): + """ + Apply one or more state SLS files to the targeted SSH resource. + + CLI Example: + + .. code-block:: bash + + salt -C 'T@ssh:node1' state.sls node1 + salt -C 'T@ssh:node1' state.sls node1,common test=True + """ + opts = _target_opts() + _seed_thin_dir(opts) + + initial_pillar = _get_initial_pillar(opts) + pillar_override = kwargs.get("pillar") + extra_filerefs = kwargs.get("extra_filerefs", "") + + opts = salt.utils.state.get_sls_opts(opts, **kwargs) + if test is None: + test = opts.get("test", False) + opts["test"] = test + + if isinstance(mods, str): + mods = [m.strip() for m in mods.split(",") if m.strip()] + + file_client = _file_client() + with salt.client.ssh.state.SSHHighState( + opts, + pillar_override, + __salt__, # pylint: disable=undefined-variable + file_client, + initial_pillar=initial_pillar, + ) as st_: + try: + pillar = st_.opts["pillar"].value() + except AttributeError: + pillar = st_.opts["pillar"] + + st_.push_active() + high_data, errors = st_.render_highstate({saltenv: mods}) + if kwargs.get("exclude"): + exclude = kwargs["exclude"] + if isinstance(exclude, str): + exclude = exclude.split(",") + high_data.setdefault("__exclude__", []).extend(exclude) + + high_data, ext_errors = st_.state.reconcile_extend(high_data) + errors += ext_errors + errors += st_.state.verify_high(high_data) + if errors: + return errors + + high_data, req_in_errors = st_.state.requisite_in(high_data) + errors += req_in_errors + high_data = st_.state.apply_exclude(high_data) + if errors: + return errors + + chunks, errors = st_.state.compile_high_data(high_data) + if errors: + return errors + + file_refs = salt.client.ssh.state.lowstate_file_refs( + chunks, + _merge_extra_filerefs( + extra_filerefs, + opts.get("extra_filerefs", ""), + ), + ) + _cleanup_slsmod_low_data(chunks) + trans_tar = salt.client.ssh.state.prep_trans_tar( + file_client, + chunks, + file_refs, + pillar, + _resource_id(), + ) + + return _exec_state_pkg(opts, trans_tar, test) + + +def apply_(mods=None, **kwargs): + """ + Apply states to the SSH resource — ``state.highstate`` if no mods are + given, ``state.sls`` otherwise. + + CLI Example: + + .. code-block:: bash + + salt -C 'T@ssh:node1' state.apply + salt -C 'T@ssh:node1' state.apply node1 + """ + if mods: + return sls(mods, **kwargs) + return highstate(**kwargs) + + +# --------------------------------------------------------------------------- +# Shared execution helper +# --------------------------------------------------------------------------- + + +def _exec_state_pkg(opts, trans_tar, test): + """ + SCP ``trans_tar`` to the remote host and run ``state.pkg`` via the + salt-thin bundle. Cleans up the local tar file regardless of outcome. + + Returns the state result dict directly (what the minion dispatcher + expects) rather than the full ``{"local": {"return": ...}}`` envelope. + + A fresh file client is created here so that ``Single.cmd_block()`` can call + ``mod_data(fsclient)`` to scan for extension modules. (``cmd_block`` was + updated in the relenv improvements merge to regenerate ext-mods before every + remote execution.) + """ + fsclient = _file_client() + try: + trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, opts["hash_type"]) + single = salt.client.ssh.Single( + opts, + "state.pkg", # placeholder; argv is updated after __init__ rewrites thin_dir + _resource_id(), + thin=_relenv_path(), + thin_dir=opts["thin_dir"], + fsclient=fsclient, + **_connection_kwargs(), + ) + # Single.__init__ may rename thin_dir (e.g. _salt → _salt_relenv) and + # writes the result back into opts["thin_dir"]. Build the real argv only now. + cmd = "state.pkg {thin_dir}/salt_state.tgz test={test} pkg_sum={pkg_sum} hash_type={hash_type}".format( + thin_dir=opts["thin_dir"], + test=test, + pkg_sum=trans_tar_sum, + hash_type=opts["hash_type"], + ) + single.argv = [cmd] + single.shell.send(trans_tar, "{}/salt_state.tgz".format(opts["thin_dir"])) + stdout, stderr, retcode = single.cmd_block() + finally: + try: + os.remove(trans_tar) + except OSError: + pass + + # parse_ret raises SSHCommandExecutionError on any non-zero retcode, even + # when the remote ran states and produced a valid result dict (e.g. some + # states failed → retcode 2). Catch that case and surface the result dict + # normally so operators see the full state tree rather than raw JSON. + try: + envelope = salt.client.ssh.wrapper.parse_ret(stdout, stderr, retcode) + except salt.client.ssh.wrapper.SSHCommandExecutionError as exc: + local = (exc.parsed or {}).get("local", {}) + if isinstance(local.get("return"), dict): + ret = local["return"] + __context__["retcode"] = local.get( # pylint: disable=undefined-variable + "retcode", salt.defaults.exitcodes.EX_STATE_FAILURE + ) + return ret + raise + + if isinstance(envelope, dict) and "return" in envelope: + ret = envelope["return"] + remote_retcode = envelope.get("retcode", 0) + if remote_retcode: + __context__["retcode"] = ( # pylint: disable=undefined-variable + remote_retcode + ) + return ret + return envelope diff --git a/salt/modules/sshresource_test.py b/salt/modules/sshresource_test.py new file mode 100644 index 000000000000..bd1c3d43a363 --- /dev/null +++ b/salt/modules/sshresource_test.py @@ -0,0 +1,45 @@ +""" +Provide the ``test`` execution module for the ``ssh`` resource type. + +This is the SSH-resource analogue of ``salt/modules/dummyresource_test.py``. +It is loaded into the per-type execution-module loader when +``opts["resource_type"]`` is ``"ssh"``, causing it to shadow the standard +``salt.modules.test`` for all jobs dispatched to SSH resources. + +The managing minion's own jobs continue to use the standard ``test`` module +loaded in the regular ``self.functions`` loader — this module is never +invoked for managing-minion jobs. +""" + +import logging + +log = logging.getLogger(__name__) + +__virtualname__ = "test" + + +def __virtual__(): + """ + Load only when this loader is scoped to the ``ssh`` resource type. + """ + if __opts__.get("resource_type") == "ssh": # pylint: disable=undefined-variable + return __virtualname__ + return False, "sshresource_test: only loads in an ssh-resource-type loader." + + +def ping(): + """ + Return ``True`` if the targeted SSH resource is reachable. + + Delegates to :func:`salt.resource.ssh.ping` via ``__resource_funcs__`` + so the result reflects actual SSH connectivity to the remote host rather + than the liveness of the managing minion. + + CLI Example: + + .. code-block:: bash + + salt -C 'T@ssh:web-01' test.ping + salt -C 'T@ssh' test.ping + """ + return __resource_funcs__["ssh.ping"]() # pylint: disable=undefined-variable diff --git a/salt/modules/state.py b/salt/modules/state.py index 7cce10ad7c29..136458dec929 100644 --- a/salt/modules/state.py +++ b/salt/modules/state.py @@ -72,6 +72,12 @@ def __virtual__(): """ Set the virtualname """ + # Resource-type loaders (resource_modules) use per-type override modules + # such as sshresource_state.py. Returning False here yields the "state" + # virtualname slot to those overrides so they are dispatched correctly + # when jobs are targeted at resources. + if __opts__.get("resource_type"): # pylint: disable=undefined-variable + return False, "state: not loaded in resource-type loaders" # Update global namespace with functions that are cloned in this module global _orchestrate _orchestrate = salt.utils.functools.namespaced_function(_orchestrate, globals()) diff --git a/salt/modules/test.py b/salt/modules/test.py index 3cd9d7d5ea43..71e5bde8e018 100644 --- a/salt/modules/test.py +++ b/salt/modules/test.py @@ -33,6 +33,13 @@ log = logging.getLogger(__name__) +def __virtual__(): + # Yield to resource-type override modules (e.g. sshresource_test.py). + if __opts__.get("resource_type"): # pylint: disable=undefined-variable + return False, "test: not loaded in resource-type loaders" + return True + + @depends("non_existantmodulename") def missing_func(): return "foo" diff --git a/salt/resource/__init__.py b/salt/resource/__init__.py new file mode 100644 index 000000000000..57971937bd90 --- /dev/null +++ b/salt/resource/__init__.py @@ -0,0 +1,3 @@ +""" +salt.resource package +""" diff --git a/salt/resource/dummy.py b/salt/resource/dummy.py new file mode 100644 index 000000000000..4a20f03cbddb --- /dev/null +++ b/salt/resource/dummy.py @@ -0,0 +1,369 @@ +""" +Dummy resource module for testing the Salt resource subsystem. + +This module implements the ``dummy`` resource type. It is the resource +analogue of ``salt.proxy.dummy`` — a self-contained, file-backed +implementation that exercises the full resource lifecycle without requiring +any real managed devices. + +Unlike a proxy module, a resource module is loaded **once per resource type +per minion**. A single instance of this module handles all ``dummy`` +resources managed by the minion. The current resource context is conveyed +via the ``__resource__`` dunder rather than as a function parameter, keeping +the interface consistent with all other Salt module systems. + +Configuration (via Pillar):: + + resources: + dummy: + resource_ids: + - dummy-01 + - dummy-02 +""" + +import copy +import logging +import os +import pprint +from contextlib import contextmanager + +import salt.utils.files +import salt.utils.msgpack +import salt.utils.resources + +log = logging.getLogger(__name__) + + +def __virtual__(): + """ + Always available — no external dependencies required. + """ + log.debug("dummy resource __virtual__() called...") + return True + + +# --------------------------------------------------------------------------- +# Internal helpers +# --------------------------------------------------------------------------- + + +def _resource_id(): + """ + Return the ID of the resource currently being operated on. + + The execution layer sets ``__resource__`` before every per-resource + dispatch. All per-resource functions call this rather than accepting + an ID parameter. + """ + return __resource__["id"] # pylint: disable=undefined-variable + + +def _initial_state(resource_id): + return { + "id": resource_id, + "services": {"apache": "running", "ntp": "running", "samba": "stopped"}, + "packages": { + "coreutils": "1.0", + "apache": "2.4", + "tinc": "1.4", + "redbull": "999.99", + }, + } + + +def _save_state(opts, resource_id, details): + cachefile = os.path.join(opts["cachedir"], f"dummy-resource-{resource_id}.cache") + with salt.utils.files.fopen(cachefile, "wb") as pck: + pck.write(salt.utils.msgpack.packb(details, use_bin_type=True)) + log.warning( + "Dummy Resource Saved State(%s):\n%s", cachefile, pprint.pformat(details) + ) + + +def _load_state(opts, resource_id): + cachefile = os.path.join(opts["cachedir"], f"dummy-resource-{resource_id}.cache") + try: + with salt.utils.files.fopen(cachefile, "rb") as pck: + state = salt.utils.msgpack.unpackb(pck.read(), raw=False) + except FileNotFoundError: + state = _initial_state(resource_id) + _save_state(opts, resource_id, state) + except Exception as exc: # pylint: disable=broad-except + log.exception("Failed to load state: %s", exc, exc_info=True) + state = _initial_state(resource_id) + _save_state(opts, resource_id, state) + log.warning( + "Dummy Resource Loaded State(%s):\n%s", cachefile, pprint.pformat(state) + ) + return state + + +@contextmanager +def _loaded_state(opts, resource_id): + state = _load_state(opts, resource_id) + original = copy.deepcopy(state) + try: + yield state + finally: + if state != original: + _save_state(opts, resource_id, state) + + +# --------------------------------------------------------------------------- +# Required resource interface +# --------------------------------------------------------------------------- + + +def init(opts): + """ + Initialize the dummy resource type for this minion. + + Called once when the resource type is loaded, before any per-resource + operations are performed. Reads the resource type configuration from the + ``dummy`` entry under the pillar subtree selected by ``resource_pillar_key`` + (see :func:`salt.utils.resources.pillar_resources_tree`) and sets up shared type-level + state in ``__context__["dummy_resource"]``. + + :param dict opts: The Salt opts dict. + """ + resource_ids = ( + salt.utils.resources.pillar_resources_tree(opts) + .get("dummy", {}) + .get("resource_ids", []) + ) + __context__["dummy_resource"] = { + "initialized": True, + "resource_ids": resource_ids, + } + log.debug("dummy resource init() called, managing: %s", resource_ids) + + +def initialized(): + """ + Return ``True`` if ``init()`` has been called successfully for this + resource type. + + Checked by the loader before dispatching per-resource operations, in the + same way ``salt.proxy.dummy.initialized()`` is used today. + + :rtype: bool + """ + return __context__.get("dummy_resource", {}).get("initialized", False) + + +def discover(opts): + """ + Return the list of resource IDs of type ``dummy`` that this minion + manages. + + Called by ``saltutil.refresh_resources`` to populate the master's + Resource Registry. For the dummy module the list of IDs is read from + ``resource_ids`` under the ``dummy`` type in the configured resource pillar + subtree. + + Returns a list of bare resource IDs (not full SRNs) — e.g. + ``["dummy-01", "dummy-02"]``. + + :param dict opts: The Salt opts dict. + :rtype: list[str] + """ + resource_ids = ( + salt.utils.resources.pillar_resources_tree(opts) + .get("dummy", {}) + .get("resource_ids", []) + ) + log.debug("dummy resource discover() returning: %s", resource_ids) + return resource_ids + + +def grains(): + """ + Return the grains dict for the current resource. + + The current resource context is available via ``__resource__``. Each + dummy resource reports a small set of static grains for use in targeting + and state execution. + + :rtype: dict + """ + resource_id = _resource_id() + with _loaded_state( + __opts__, resource_id + ) as state: # pylint: disable=undefined-variable + state["grains_cache"] = { # pylint: disable=unsupported-assignment-operation + "dummy_grain_1": "one", + "dummy_grain_2": "two", + "dummy_grain_3": "three", + "resource_id": resource_id, + } + return state["grains_cache"] + + +def grains_refresh(): + """ + Invalidate the cached grains for the current resource and return a + freshly generated grains dict. + + :rtype: dict + """ + resource_id = _resource_id() + with _loaded_state( + __opts__, resource_id + ) as state: # pylint: disable=undefined-variable + state.pop("grains_cache", None) + return grains() + + +def ping(): + """ + Return ``True`` if the current resource is reachable and responsive. + + For the dummy module this always returns ``True``; no real connection + is made. + """ + resource_id = _resource_id() + log.debug("dummy resource ping() called for %s", resource_id) + return True + + +def shutdown(opts): + """ + Tear down the dummy resource type. + + Called when the minion shuts down or the resource type is unloaded. + Cleans up shared type-level state from ``__context__``. + + :param dict opts: The Salt opts dict. + """ + log.debug("dummy resource shutdown() called...") + __context__.pop("dummy_resource", None) + + +# --------------------------------------------------------------------------- +# Per-resource operations (mirrors salt.proxy.dummy for testing parity) +# --------------------------------------------------------------------------- + + +def service_start(name): + """ + Start a "service" on the current dummy resource. + """ + with _loaded_state( + __opts__, _resource_id() + ) as state: # pylint: disable=undefined-variable + state["services"][name] = "running" + return "running" + + +def service_stop(name): + """ + Stop a "service" on the current dummy resource. + """ + with _loaded_state( + __opts__, _resource_id() + ) as state: # pylint: disable=undefined-variable + state["services"][name] = "stopped" + return "stopped" + + +def service_restart(name): + """ + Restart a "service" on the current dummy resource. + """ + return True + + +def service_list(): + """ + List "services" on the current dummy resource. + """ + with _loaded_state( + __opts__, _resource_id() + ) as state: # pylint: disable=undefined-variable + return list(state["services"]) + + +def service_status(name): + """ + Return the status of a service on the current dummy resource. + """ + with _loaded_state( + __opts__, _resource_id() + ) as state: # pylint: disable=undefined-variable + if state["services"][name] == "running": + return {"comment": "running"} + return {"comment": "stopped"} + + +def package_list(): + """ + List "packages" installed on the current dummy resource. + """ + with _loaded_state( + __opts__, _resource_id() + ) as state: # pylint: disable=undefined-variable + return state["packages"] + + +def package_install(name, **kwargs): + """ + Install a "package" on the current dummy resource. + """ + version = kwargs.get("version", "1.0") + with _loaded_state( + __opts__, _resource_id() + ) as state: # pylint: disable=undefined-variable + state["packages"][name] = version + return {name: version} + + +def package_remove(name): + """ + Remove a "package" from the current dummy resource. + """ + with _loaded_state( + __opts__, _resource_id() + ) as state: # pylint: disable=undefined-variable + state["packages"].pop(name) + return state["packages"] + + +def package_status(name): + """ + Return the installation status of a package on the current dummy resource. + """ + with _loaded_state( + __opts__, _resource_id() + ) as state: # pylint: disable=undefined-variable + if name in state["packages"]: + return {name: state["packages"][name]} + + +def upgrade(): + """ + "Upgrade" all packages on the current dummy resource. + """ + with _loaded_state( + __opts__, _resource_id() + ) as state: # pylint: disable=undefined-variable + for pkg in state["packages"]: + state["packages"][pkg] = str(float(state["packages"][pkg]) + 1.0) + return state["packages"] + + +def uptodate(): + """ + Report whether packages on the current dummy resource are up to date. + """ + with _loaded_state( + __opts__, _resource_id() + ) as state: # pylint: disable=undefined-variable + return state["packages"] + + +def test_from_state(): + """ + Test function so we have something to call from a state. + """ + log.debug("test_from_state called for resource %s", _resource_id()) + return "testvalue" diff --git a/salt/resource/ssh.py b/salt/resource/ssh.py new file mode 100644 index 000000000000..6d48681a5548 --- /dev/null +++ b/salt/resource/ssh.py @@ -0,0 +1,521 @@ +""" +SSH resource module — exposes remote Linux/Unix machines as Salt Resources +using the salt-ssh Shell transport layer. + +Each ``ssh`` resource maps to one remote host reachable via SSH. Because +resources share a single loader per type, a minion managing 500 SSH hosts +uses one loader rather than 500 proxy processes, each with its own key pair. + +This module uses :class:`salt.client.ssh.shell.Shell` for raw command +execution (``cmd_run``, ``ping``) and :class:`salt.client.ssh.Single` with +the salt-thin bundle for grain collection (``grains.items``), giving the same +complete, accurate grain set that ``salt-ssh`` provides. + +Configuration (via Pillar; top-level key defaults to ``resources``, overridable +with minion option ``resource_pillar_key``):: + + resources: + ssh: + hosts: + web-01: + host: 192.168.1.10 + user: root + priv: /etc/salt/ssh_keys/web-01 + web-02: + host: 192.168.1.11 + user: admin + passwd: secretpassword + no_host_keys: true + +Per-host connection parameters: + +``host`` + Hostname or IP address of the remote machine (required). +``user`` + SSH login user (default: ``root``). +``port`` + SSH port (default: ``22``). +``priv`` + Path to the SSH private key file. Mutually exclusive with ``passwd`` + but both may be specified; when ``priv`` is set Salt uses key-based + option strings even if ``passwd`` is also set. +``passwd`` + SSH password. Prefer key-based authentication for production. +``priv_passwd`` + Passphrase protecting the private key. +``sudo`` + Run commands as root via sudo (default: ``False``). +``timeout`` + SSH connection timeout in seconds (default: ``30``). +``identities_only`` + Pass ``-o IdentitiesOnly=yes`` to prevent the SSH agent from offering + unrelated keys (default: ``False``). +``no_host_keys`` + Disable host key checking entirely — sets both + ``StrictHostKeyChecking=no`` and ``UserKnownHostsFile=/dev/null`` + (default: ``False``). +``ignore_host_keys`` + Pass ``-o StrictHostKeyChecking=no`` without discarding the + known-hosts database (default: ``False``). +``known_hosts_file`` + Path to a custom ``known_hosts`` file for this host. +``ssh_options`` + List of additional ``-o Key=Value`` options passed verbatim to the + ``ssh`` binary. +``keepalive`` + Enable TCP keepalives (default: ``True``). +``keepalive_interval`` + ``ServerAliveInterval`` in seconds (default: from Salt opts or ``60``). +``keepalive_count_max`` + ``ServerAliveCountMax`` (default: from Salt opts or ``3``). +""" + +import logging +import os +import uuid + +import salt.client.ssh +import salt.client.ssh.shell +import salt.config +import salt.fileclient +import salt.utils.json +import salt.utils.network +import salt.utils.path +import salt.utils.resources + +log = logging.getLogger(__name__) + +CONTEXT_KEY = "ssh_resource" + + +# --------------------------------------------------------------------------- +# Module availability +# --------------------------------------------------------------------------- + + +def __virtual__(): + """ + Only load when the ``ssh`` binary is present on the minion's PATH. + """ + if not salt.utils.path.which("ssh"): + return False, "ssh binary not found on PATH" + return True + + +# --------------------------------------------------------------------------- +# Internal helpers +# --------------------------------------------------------------------------- + + +def _resource_id(): + """Return the ID of the resource currently being operated on.""" + return __resource__["id"] # pylint: disable=undefined-variable + + +def _host_cfg(resource_id): + """Return the Pillar-sourced connection config dict for *resource_id*.""" + return __context__[CONTEXT_KEY]["hosts"].get( + resource_id, {} + ) # pylint: disable=undefined-variable + + +def _shell_opts(cfg): + """ + Build a merged opts dict for :class:`~salt.client.ssh.shell.Shell`. + + ``Shell`` reads ``ignore_host_keys``, ``no_host_keys``, + ``known_hosts_file``, and ``_ssh_version`` out of its opts dict rather + than out of constructor kwargs. This helper layers per-host overrides on + top of ``__opts__`` so each Shell instance honours its resource's config. + """ + merged = dict(__opts__) # pylint: disable=undefined-variable + for key in ("ignore_host_keys", "no_host_keys", "known_hosts_file"): + if key in cfg: + merged[key] = cfg[key] + # Ensure _ssh_version is always present. _passwd_opts() accesses it via + # [] without a default and would raise KeyError without this guard. + if "_ssh_version" not in merged: + cached = __context__.get(CONTEXT_KEY, {}).get( + "_ssh_version" + ) # pylint: disable=undefined-variable + merged["_ssh_version"] = ( + cached if cached is not None else salt.client.ssh.ssh_version() + ) + return merged + + +def _make_shell(resource_id, cfg_override=None): + """ + Return a :class:`~salt.client.ssh.shell.Shell` instance for *resource_id*. + + :param str resource_id: The bare resource ID. + :param dict cfg_override: Optional dict of per-call overrides (e.g. + ``{"timeout": 5}``). Values are layered on top of the stored host + config; the stored config is not mutated. + """ + cfg = _host_cfg(resource_id) + if cfg_override: + cfg = dict(cfg) + cfg.update(cfg_override) + + return salt.client.ssh.shell.Shell( + _shell_opts(cfg), + host=cfg["host"], + user=cfg.get("user", "root"), + port=cfg.get("port", 22), + passwd=cfg.get("passwd"), + priv=cfg.get("priv"), + priv_passwd=cfg.get("priv_passwd"), + timeout=cfg.get("timeout", 30), + sudo=cfg.get("sudo", False), + tty=cfg.get("tty", False), + identities_only=cfg.get("identities_only", False), + ssh_options=cfg.get("ssh_options"), + keepalive=cfg.get("keepalive", True), + keepalive_interval=cfg.get("keepalive_interval", 60), + keepalive_count_max=cfg.get("keepalive_count_max", 3), + ) + + +def _thin_dir(cfg): + """ + Return the remote working directory for the salt-thin bundle. + + Uses the per-host ``thin_dir`` config key when provided. Otherwise + computes a path under ``/tmp/`` (always world-writable) using the same + ``.__salt`` naming convention as Salt's DEFAULT_THIN_DIR, + but avoiding ``/var/tmp/`` which may be root-only on some systems. + """ + if "thin_dir" in cfg: + return cfg["thin_dir"] + fqdn_uuid = uuid.uuid3(uuid.NAMESPACE_DNS, salt.utils.network.get_fqhostname()).hex[ + :6 + ] + return "/tmp/.{}_{}_salt".format(cfg.get("user", "root"), fqdn_uuid) + + +def _relenv_path(): + """ + Return the path to a pre-built relenv tarball if one exists locally, otherwise + ``None`` so ``Single.__init__`` can detect the remote arch and fetch the right + tarball (same strategy as :func:`salt.modules.sshresource_state._relenv_path`). + + Pre-resolving an existing local path avoids an extra SSH round-trip during + ``Single`` construction when ``Single`` was instantiated inside a minion job + worker (where ``detect_os_arch()`` hung or added latency). + """ + cachedir = __opts__.get("cachedir", "") # pylint: disable=undefined-variable + for arch in ("x86_64", "arm64"): + path = os.path.join(cachedir, "relenv", "linux", arch, "salt-relenv.tar.xz") + if os.path.exists(path): + return path + return None + + +def _file_client(): + """ + Return a file client for ``Single.cmd_block()`` to use when regenerating + extension modules via ``mod_data(fsclient)``. + + Uses the master opts cached during :func:`init` to build an ``FSClient`` + (local-filesystem, no network channel) — the same approach used by + ``sshresource_state._file_client()``. Falls back to a ``RemoteClient`` + when no cached master opts are available. + """ + master_opts = __context__.get( + CONTEXT_KEY, {} + ).get( # pylint: disable=undefined-variable + "master_opts" + ) + if master_opts: + mo = dict(master_opts) + mo.setdefault( + "cachedir", __opts__.get("cachedir", "") + ) # pylint: disable=undefined-variable + return salt.fileclient.FSClient(mo) + log.warning( + "ssh resource: no cached master opts in context, " + "falling back to RemoteClient for fsclient" + ) + return salt.fileclient.get_file_client( + __opts__ + ) # pylint: disable=undefined-variable + + +def _make_single(resource_id, argv): + """ + Return a :class:`~salt.client.ssh.Single` instance for *resource_id* + configured to run *argv* via the salt-thin bundle. + + We call :meth:`~salt.client.ssh.Single.cmd_block` directly rather than + :meth:`~salt.client.ssh.Single.run` to stay on the thin-bundle code path + and avoid the wrapper-function path that requires a master file client. + """ + cfg = _host_cfg(resource_id) + ctx = __context__.get(CONTEXT_KEY, {}) # pylint: disable=undefined-variable + + single_opts = dict(__opts__) # pylint: disable=undefined-variable + single_opts["no_host_keys"] = cfg.get( + "no_host_keys", single_opts.get("no_host_keys", False) + ) + single_opts["ignore_host_keys"] = cfg.get( + "ignore_host_keys", single_opts.get("ignore_host_keys", False) + ) + if "known_hosts_file" in cfg: + single_opts["known_hosts_file"] = cfg["known_hosts_file"] + single_opts["_ssh_version"] = ( + ctx.get("_ssh_version") or salt.client.ssh.ssh_version() + ) + + single_opts["relenv"] = True + return salt.client.ssh.Single( + single_opts, + argv, + resource_id, + thin=_relenv_path(), + fsclient=_file_client(), + host=cfg["host"], + user=cfg.get("user", "root"), + port=cfg.get("port", 22), + passwd=cfg.get("passwd"), + priv=cfg.get("priv"), + priv_passwd=cfg.get("priv_passwd"), + timeout=cfg.get("timeout", 30), + sudo=cfg.get("sudo", False), + tty=cfg.get("tty", False), + identities_only=cfg.get("identities_only", False), + ssh_options=cfg.get("ssh_options"), + keepalive=cfg.get("keepalive", True), + keepalive_interval=cfg.get("keepalive_interval", 60), + keepalive_count_max=cfg.get("keepalive_count_max", 3), + thin_dir=_thin_dir(cfg), + ) + + +# --------------------------------------------------------------------------- +# Required resource interface +# --------------------------------------------------------------------------- + + +def init(opts): + """ + Initialize the ``ssh`` resource type for this minion. + + Called once when the resource type is loaded, before any per-resource + operations are dispatched. Reads host configs from the ``ssh`` entry under + the pillar subtree selected by ``resource_pillar_key`` (see + :func:`salt.utils.resources.pillar_resources_tree`), caches them in + ``__context__["ssh_resource"]``, and pre-resolves the SSH binary version + so that :func:`_shell_opts` never has to run a subprocess during a job. + + :param dict opts: The Salt opts dict. + """ + resource_cfg = salt.utils.resources.pillar_resources_tree(opts).get("ssh", {}) + hosts = resource_cfg.get("hosts", {}) + __context__[CONTEXT_KEY] = { # pylint: disable=undefined-variable + "initialized": True, + "hosts": hosts, + "_ssh_version": salt.client.ssh.ssh_version(), + } + + # Cache master opts so sshresource_state can build an FSClient for state + # compilation without creating a new network channel inside a job thread. + # We read the master config from disk (same conf dir as the minion) to get + # the full config with all defaults, rather than the partial dict returned + # by RemoteClient.master_opts() which omits keys like fileserver_backend. + try: + conf_dir = os.path.dirname(opts.get("conf_file", "")) + master_conf = os.path.join(conf_dir, "master") + if os.path.isfile(master_conf): + master_opts = salt.config.master_config(master_conf) + # roots.FSChan expects cachedir; minimal or test master configs may omit it. + master_opts.setdefault("cachedir", opts.get("cachedir", "")) + __context__[CONTEXT_KEY][ + "master_opts" + ] = master_opts # pylint: disable=undefined-variable + log.debug("ssh resource init: loaded master opts from %s", master_conf) + else: + # Fall back to RemoteClient if we can't find the master config on disk. + file_client = salt.fileclient.get_file_client(opts) + master_opts = file_client.master_opts() + if isinstance(master_opts, dict) and master_opts: + master_opts.setdefault("fileserver_backend", ["roots"]) + master_opts.setdefault("cachedir", opts.get("cachedir", "")) + __context__[CONTEXT_KEY][ + "master_opts" + ] = master_opts # pylint: disable=undefined-variable + file_client.destroy() + except Exception as exc: # pylint: disable=broad-except + log.warning("ssh resource init: failed to load master opts: %s", exc) + + log.debug("ssh resource init() called, managing: %s", list(hosts)) + + +def initialized(): + """ + Return ``True`` if :func:`init` has completed successfully. + + :rtype: bool + """ + return __context__.get(CONTEXT_KEY, {}).get( + "initialized", False + ) # pylint: disable=undefined-variable + + +def discover(opts): + """ + Return the list of SSH resource IDs managed by this minion. + + The list is the set of keys under ``hosts`` for the ``ssh`` type under the + configured resource pillar subtree. Adding or removing a + host from that Pillar key and running ``saltutil.refresh_resources`` + updates the Master's Resource Registry without any process restart. + + :param dict opts: The Salt opts dict. + :rtype: list[str] + """ + hosts = ( + salt.utils.resources.pillar_resources_tree(opts).get("ssh", {}).get("hosts", {}) + ) + resource_ids = list(hosts) + log.debug("ssh resource discover() returning: %s", resource_ids) + return resource_ids + + +def grains(): + """ + Return full Salt grains for the current SSH resource. + + Runs ``grains.items`` on the remote host via the salt-thin bundle + (the same mechanism used by ``salt-ssh``), giving us the complete, + accurate grain set rather than a hand-crafted subset. + + Results are cached in ``__context__`` per resource ID. Call + :func:`grains_refresh` to force re-collection. + + :rtype: dict + """ + resource_id = _resource_id() + + ctx = __context__.get(CONTEXT_KEY, {}) # pylint: disable=undefined-variable + cached = ctx.get("grains", {}).get(resource_id) + if cached is not None: + return cached + + cfg = _host_cfg(resource_id) + single = _make_single(resource_id, ["grains.items"]) + stdout, stderr, retcode = single.cmd_block() + + if retcode != 0 or stdout.startswith("ERROR"): + log.warning( + "ssh resource grains: grains.items failed for %s (rc=%d): %s", + resource_id, + retcode, + stderr or stdout, + ) + return { + "resource_type": "ssh", + "resource_id": resource_id, + "host": cfg.get("host", ""), + } + + try: + parsed = salt.utils.json.loads(stdout) + # thin bundle wraps result as {"local": {"jid": "...", "return": {...}}} + data = parsed.get("local", {}).get("return", parsed) + except Exception as exc: # pylint: disable=broad-except + log.warning( + "ssh resource grains: failed to parse output for %s: %s", resource_id, exc + ) + return { + "resource_type": "ssh", + "resource_id": resource_id, + "host": cfg.get("host", ""), + } + + data["resource_type"] = "ssh" + data["resource_id"] = resource_id + + ctx.setdefault("grains", {})[resource_id] = data + return data + + +def grains_refresh(): + """ + Invalidate the grains cache for the current SSH resource and re-collect. + + :rtype: dict + """ + resource_id = _resource_id() + ctx = __context__.get(CONTEXT_KEY, {}) # pylint: disable=undefined-variable + ctx.get("grains", {}).pop(resource_id, None) + return grains() + + +def ping(): + """ + Return ``True`` if the current SSH resource is reachable via SSH. + + Runs ``echo ping`` on the remote host. A zero exit code and the + expected output indicate that the SSH connection is healthy. + """ + resource_id = _resource_id() + try: + shell = _make_shell(resource_id, cfg_override={"timeout": 10}) + stdout, _stderr, retcode = shell.exec_cmd("echo ping") + return retcode == 0 and "ping" in stdout + except Exception as exc: # pylint: disable=broad-except + log.warning("ssh resource ping() failed for %s: %s", resource_id, exc) + return False + + +def shutdown(opts): + """ + Tear down the ``ssh`` resource type. + + Called when the minion shuts down or the resource type is unloaded. + Clears shared type-level state from ``__context__``. + + :param dict opts: The Salt opts dict. + """ + log.debug("ssh resource shutdown() called") + __context__.pop(CONTEXT_KEY, None) # pylint: disable=undefined-variable + + +# --------------------------------------------------------------------------- +# Per-resource operations +# --------------------------------------------------------------------------- + + +def cmd_run(cmd, timeout=None): + """ + Execute a shell command on the current SSH resource. + + This is the primary building block for execution modules that target + SSH resources — analogous to ``__proxy__["ssh_sample.cmd"]()`` in the + proxy model. Execution module overrides for the ``ssh`` resource type + delegate their work here. + + Returns a dict with keys: + + * ``stdout`` — standard output from the remote command + * ``stderr`` — standard error from the remote command + * ``retcode`` — exit code (0 on success) + + :param str cmd: The shell command to run on the remote host. + :param int timeout: Optional per-call SSH timeout in seconds. When + provided, overrides the connection-level ``timeout`` for this + call only. + :rtype: dict + + CLI Example (via resource execution module): + + .. code-block:: bash + + salt -C 'T@ssh:web-01' ssh_cmd.run 'uptime' + """ + resource_id = _resource_id() + override = {"timeout": timeout} if timeout is not None else None + shell = _make_shell(resource_id, override) + stdout, stderr, retcode = shell.exec_cmd(cmd) + return {"stdout": stdout, "stderr": stderr, "retcode": retcode} diff --git a/salt/state.py b/salt/state.py index f197f8845567..359ec943a327 100644 --- a/salt/state.py +++ b/salt/state.py @@ -120,6 +120,10 @@ "__pub_ret", "__pub_pid", "__pub_tgt_type", + "__pub_resource_targets", + "__pub_minion_is_target", + "__pub_resource_target", + "__pub_resource_job", "__prereq__", "__prerequiring__", "__umask__", diff --git a/salt/states/saltutil.py b/salt/states/saltutil.py index ab951fc992ca..f325e0e5b0b0 100644 --- a/salt/states/saltutil.py +++ b/salt/states/saltutil.py @@ -323,6 +323,20 @@ def sync_tops(name, **kwargs): return _sync_single(name, "tops", **kwargs) +def sync_resources(name, **kwargs): + """ + Performs the same task as saltutil.sync_resources module + See :mod:`saltutil module for full list of options ` + + .. code-block:: yaml + + sync_everything: + saltutil.sync_resources: + - refresh: True + """ + return _sync_single(name, "resources", **kwargs) + + def sync_thorium(name, **kwargs): """ Performs the same task as saltutil.sync_thorium module diff --git a/salt/utils/minions.py b/salt/utils/minions.py index 0d493928445c..a9e0d9401f29 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -6,6 +6,8 @@ import fnmatch import logging import re +import threading +import time import salt.cache import salt.key @@ -34,10 +36,9 @@ TARGET_REX = re.compile( r"""(?x) ( - (?PG|P|I|J|L|N|S|E|R) # Possible target engines - (?P(?<=G|P|I|J).)? # Optional delimiter for specific engines - @)? # Engine+delimiter are separated by a '@' - # character and are optional for the target + (?PG|P|I|J|L|N|S|E|R|T|M) # Possible target engines + (?P(?<=G|P|I|J).)? # Optional delimiter (G/P/I/J only) + @)? # Engine+delimiter separated by '@', optional (?P.+)$""" # The pattern passed to the target engine ) @@ -195,6 +196,136 @@ def nodegroup_comp(nodegroup, nodegroups, skip=None, first_call=True): return ret +# --------------------------------------------------------------------------- +# Resource index — O(1) lookup for T@ targeting and wildcard augmentation +# --------------------------------------------------------------------------- +# +# A single flat dict stored in one cache file replaces the old per-minion +# ``minion_resources/`` files. Each master worker keeps an in-process +# copy that is refreshed from disk at most once every _RESOURCE_INDEX_TTL +# seconds. AESFuncs._register_resources updates the in-process copy AND +# the on-disk file immediately, so the current worker is always consistent. + +_RESOURCE_INDEX_BANK = "resource_index" +_RESOURCE_INDEX_KEY = "index" +_RESOURCE_INDEX_TTL = 5.0 # seconds + +# Functions where resources run inline and their results are merged into the +# managing minion's own response. The operator sees ONE combined block + ONE +# Summary section instead of separate blocks per resource. +_MERGE_RESOURCE_FUNS = frozenset( + { + "state.apply", + "state.highstate", + "state.sls", + "state.sls_id", + "state.single", + } +) + +_resource_index_lock = threading.Lock() +_resource_index: dict = {"by_id": {}, "by_type": {}, "by_minion": {}} +_resource_index_ts: float = 0.0 + + +def _build_resource_index(by_minion): + """ + Build the three-way flat index from a ``{minion_id: {rtype: [rid, ...]}}`` + mapping. + + Returns:: + + { + "by_id": {rid: {"minion": minion_id, "type": rtype}, ...}, + "by_type": {rtype: [rid, ...], ...}, + "by_minion": {minion_id: {rtype: [rid, ...]}, ...}, + } + """ + by_id = {} + by_type = {} + for minion_id, resources in by_minion.items(): + for rtype, rids in resources.items(): + if rtype not in by_type: + by_type[rtype] = [] + for rid in rids: + by_id[rid] = {"minion": minion_id, "type": rtype} + if rid not in by_type[rtype]: + by_type[rtype].append(rid) + return {"by_id": by_id, "by_type": by_type, "by_minion": dict(by_minion)} + + +def _get_resource_index(cache): + """ + Return the in-process resource index, refreshing from disk if the TTL has + expired. Thread-safe via double-checked locking. + """ + global _resource_index, _resource_index_ts # pylint: disable=global-statement + now = time.monotonic() + if now - _resource_index_ts < _RESOURCE_INDEX_TTL: + return _resource_index + with _resource_index_lock: + if now - _resource_index_ts < _RESOURCE_INDEX_TTL: + return _resource_index + try: + loaded = cache.fetch(_RESOURCE_INDEX_BANK, _RESOURCE_INDEX_KEY) or {} + except Exception: # pylint: disable=broad-except + log.error("Failed to load resource index from cache", exc_info=True) + loaded = {} + _resource_index = loaded or {"by_id": {}, "by_type": {}, "by_minion": {}} + _resource_index_ts = now + return _resource_index + + +def _update_resource_index(cache, minion_id, resources): + """ + Surgically update the in-process index for one minion and persist it to + disk. + + Called from ``AESFuncs._register_resources`` so the current worker's index + is immediately consistent after a minion registers without waiting for the + TTL to expire. + + Cost is O(r) where r is the number of resources for *this minion* — the + old per-minion entries are removed individually and the new ones are + inserted directly, without rebuilding the entire index. + """ + global _resource_index, _resource_index_ts # pylint: disable=global-statement + with _resource_index_lock: + by_id = _resource_index.get("by_id", {}) + by_type = _resource_index.get("by_type", {}) + by_minion = _resource_index.get("by_minion", {}) + + # Remove old entries for this minion only. + old = by_minion.pop(minion_id, {}) + for rtype, rids in old.items(): + old_set = set(rids) + for rid in rids: + by_id.pop(rid, None) + if rtype in by_type: + by_type[rtype] = [r for r in by_type[rtype] if r not in old_set] + if not by_type[rtype]: + del by_type[rtype] + + # Insert new entries. + if resources: + by_minion[minion_id] = resources + for rtype, rids in resources.items(): + existing = by_type.setdefault(rtype, []) + existing_set = set(existing) + for rid in rids: + by_id[rid] = {"minion": minion_id, "type": rtype} + if rid not in existing_set: + existing.append(rid) + existing_set.add(rid) + + _resource_index = {"by_id": by_id, "by_type": by_type, "by_minion": by_minion} + try: + cache.store(_RESOURCE_INDEX_BANK, _RESOURCE_INDEX_KEY, _resource_index) + except Exception: # pylint: disable=broad-except + log.error("Failed to persist resource index to cache", exc_info=True) + _resource_index_ts = time.monotonic() + + class CkMinions: """ Used to check what minions should respond from a target @@ -209,6 +340,7 @@ def __init__(self, opts): self.opts = opts self.cache = salt.cache.factory(opts) self.key = salt.key.get_key(opts) + # TODO(resources): self.registry = ResourceRegistry(opts) # TODO: this is actually an *auth* check if self.opts.get("transport", "zeromq") in salt.transport.TRANSPORTS: self.acc = "minions" @@ -526,6 +658,8 @@ def _deferred_minions(): "S": self._check_ipcidr_minions, "E": self._check_pcre_minions, "R": self._all_minions, + "T": self._check_resource_minions, + "M": self._check_managing_minion_minions, } if pillar_exact: ref["I"] = self._check_pillar_exact_minions @@ -722,8 +856,112 @@ def _all_minions(self, expr=None, minions=None): return {"minions": minions, "missing": []} + def _check_resource_minions(self, expr, greedy, minions=None): + """ + Return the resource IDs that match the ``T@`` pattern ``expr``. + + ``expr`` is either a bare resource type (``dummy``) or a full SRN + (``dummy:dummy-01``). + + Unlike other ``_check_*_minions`` methods, the returned IDs are + **resource IDs**, not managing-minion IDs. This is intentional: the + CLI uses this list to know which return IDs to expect, and resource + returns are keyed by resource ID (remapped by the master's ``_return`` + handler after the transport security check passes). + + Job delivery is handled separately: the job is published with the + original ``T@`` target expression and ``tgt_type=compound``; managing + minions receive it via broadcast and filter locally with + ``resource_match.match()``. + + Lookups are O(1) dict access against the in-process resource index + (refreshed from a single flat cache file at most once per TTL). + """ + if ":" in expr: + resource_type, resource_id = expr.split(":", 1) + # Treat a trailing colon with no ID (e.g. "dummy:") as a bare-type + # expression so it matches all resources of that type rather than + # returning an invalid empty-string resource ID. + if not resource_id: + resource_id = None + else: + resource_type, resource_id = expr, None + + index = _get_resource_index(self.cache) + + if resource_id is not None: + # Full SRN: O(1) lookup by resource ID. + if resource_id in index["by_id"]: + return {"minions": [resource_id], "missing": []} + log.debug( + "T@%s not in resource index; using resource ID from expression", + expr, + ) + return {"minions": [resource_id], "missing": []} + + # Bare type: O(1) lookup by type. + rids = index["by_type"].get(resource_type) + if rids: + return {"minions": list(rids), "missing": []} + + log.warning( + "T@%s: resource registry has no entries of this type. " + "Restart or sync_all the managing minion to populate the registry.", + expr, + ) + return {"minions": [], "missing": []} + + def _augment_with_resources(self, minion_ids): + """ + Append the resource IDs managed by each matched minion to the list. + + Called by :meth:`check_minions` for wildcard glob targets so that + ``salt '*' test.ping`` also includes returns from managed resources. + + Lookups are O(1) dict access per minion against the in-process + resource index. If the index is unavailable the method logs an error + and returns ``minion_ids`` unchanged so ordinary targeting is never + disrupted by a resource-cache failure. + """ + try: + index = _get_resource_index(self.cache) + except Exception: # pylint: disable=broad-except + log.error( + "Failed to load resource index; resource IDs will not be " + "included in this target expansion.", + exc_info=True, + ) + return list(minion_ids) + by_minion = index.get("by_minion", {}) + if not by_minion: + return list(minion_ids) + result = list(minion_ids) + seen = set(result) + for minion_id in minion_ids: + resources = by_minion.get(minion_id, {}) + for rids in resources.values(): + for rid in rids: + if rid not in seen: + result.append(rid) + seen.add(rid) + return result + + def _check_managing_minion_minions(self, expr, greedy, minions=None): + """ + Return the minion set for a ``M@`` managing-minion expression. + + ``expr`` is a minion ID glob. Returns any accepted minion whose ID + matches ``expr``. + """ + return self._check_glob_minions(expr, greedy, minions=minions) + def check_minions( - self, expr, tgt_type="glob", delimiter=DEFAULT_TARGET_DELIM, greedy=True + self, + expr, + tgt_type="glob", + delimiter=DEFAULT_TARGET_DELIM, + greedy=True, + fun=None, ): """ Check the passed regex against the available minions' public keys @@ -749,6 +987,23 @@ def check_minions( # pylint: enable=not-callable else: _res = check_func(expr, greedy) # pylint: disable=not-callable + # For wildcard glob targets (e.g. ``salt '*'``), include resource + # IDs managed by matched minions so that the master keeps its + # response window open long enough to receive resource results. + # Specific name targets (e.g. ``salt 'minion'``) are intentionally + # NOT augmented — targeting a minion by name should not implicitly + # include its resources. + # Compound targets handle resource matching explicitly via T@/M@. + # Merge-mode functions (state.apply etc.) run resources inline on + # the managing minion and return ONE combined response, so the + # master must NOT add separate resource IDs to its wait-list. + if ( + tgt_type == "glob" + and isinstance(expr, str) + and any(c in expr for c in ("*", "?", "[")) + and not (isinstance(fun, str) and fun in _MERGE_RESOURCE_FUNS) + ): + _res["minions"] = self._augment_with_resources(_res["minions"]) _res["ssh_minions"] = False if self.opts.get("enable_ssh_minions", False) is True and isinstance( "tgt", str diff --git a/salt/utils/relenv.py b/salt/utils/relenv.py index 4f2c4878d8fe..b2be0399167e 100644 --- a/salt/utils/relenv.py +++ b/salt/utils/relenv.py @@ -41,7 +41,11 @@ def gen_relenv( tarball_path = os.path.join(relenv_dir, "salt-relenv.tar.xz") - # Download the tarball if it doesn't exist or overwrite is True + # Download the tarball if it doesn't exist or overwrite is True. + # NOTE: get_tarball() always makes network requests to scrape the latest + # version number even when the tarball is already cached. Skip it when + # the file is present and overwrite is not requested — this avoids + # unnecessary latency and failures in air-gapped environments. if overwrite or not os.path.exists(tarball_path): # Check for shared test cache first (for integration tests) import shutil diff --git a/salt/utils/resource_registry.py b/salt/utils/resource_registry.py new file mode 100644 index 000000000000..83972d232fb2 --- /dev/null +++ b/salt/utils/resource_registry.py @@ -0,0 +1,184 @@ +""" +Resource Registry — the system of record for Salt Resources. + +The registry tracks which minions manage each resource and is the backing +store that the targeting layer queries when resolving ``T@`` and ``M@`` +expressions. + +**Minions are resources.** A traditional Salt minion is a resource of type +``minion`` with SRN ``minion:``. All resources are stored and +queried uniformly through this registry. + +Cache layout +------------ +Resources are stored across three banks in Salt's pluggable cache +(``salt.cache.factory(opts)``). All banks use the **bare resource ID** as the +key. IDs are globally unique across all types and all minions, so the type is +never part of the cache key:: + + bank: "grains", key: "" → {grain_dict} + bank: "pillar", key: "" → {pillar_dict} + bank: "resources", key: "" → {"type": "...", "managing_minions": [...]} + +The ``grains`` and ``pillar`` banks are unchanged from today — existing minion +entries require no migration. + +The ``resources`` bank is new. It is the topology store: it records the +resource type and which minions manage each resource. This data is externally +defined (by RAAS or the operator) and is never self-reported by the resource. + +A resource may be managed by more than one minion. ``managing_minions`` is +always a list. For a minion-type resource it contains the minion's own ID. + +Resources enter the registry in two ways: + +* **Defined in RAAS** — created directly in the enterprise control plane. +* **Reported by minions** — minions discover resources and push them to the + Master via ``saltutil.refresh_resources``, analogous to grain reporting. + +This module provides the interface consumed by the targeting layer. +Registry population (registration and discovery) is handled elsewhere. +""" + +import logging + +# import salt.cache # TODO(resources): uncomment when ResourceRegistry is implemented + +log = logging.getLogger(__name__) + +RESOURCE_BANK = "resources" + + +def parse_srn(expression): + """ + Parse a ``T@`` pattern into its ``type`` and ``id`` components. + + A full Salt Resource Name (SRN) has the form ``:``. A bare + expression contains only a type with no colon. The cache never sees the + full SRN — this function is used only by the targeting layer when parsing + user-supplied expressions. + + Returns a dict with keys: + + * ``type`` — the resource type string (e.g. ``"vcf_host"``). + * ``id`` — the bare resource ID string, or ``None`` for a bare type. + + Examples:: + + parse_srn("vcf_host") # {"type": "vcf_host", "id": None} + parse_srn("vcf_host:esxi-01") # {"type": "vcf_host", "id": "esxi-01"} + + :param str expression: A bare resource type or a full SRN. + :rtype: dict + """ + + +class ResourceRegistry: + """ + Master-side interface to the Salt Resource Registry backed by ``salt.cache``. + + Instantiate with the Salt opts dict; the class opens its own cache handle + via ``salt.cache.factory(opts)`` so callers do not manage the cache + directly. The cache backend (localfs, redis, etc.) is determined by + ``opts["cache"]``, exactly as it is for grains and pillar caching in + ``CkMinions``:: + + registry = ResourceRegistry(opts) + registry.get_managing_minions_by_type("vcf_host") + + This class is a master-side construct. Minions do not query the registry + cache — they read resource information from ``opts["resources"]``, which is + populated by the resource module loader at startup, analogous to how + ``opts["grains"]`` is populated by the grain loader. + """ + + def __init__(self, opts): + """ + Initialise the registry and open a handle to the Salt cache. + + :param dict opts: The Salt opts dict. + """ + + # ------------------------------------------------------------------ + # Read interface — used by the targeting layer + # ------------------------------------------------------------------ + + def get_resource(self, resource_id): + """ + Return the topology blob for a single resource from the ``resources`` + bank, or ``None`` if the resource is not registered:: + + cache.fetch("resources", resource_id) + + The blob contains at minimum ``type`` and ``managing_minions`` keys. + + :param str resource_id: The bare resource ID (e.g. ``"esxi-01"``). + :rtype: dict or None + """ + + def get_managing_minions_by_type(self, resource_type): + """ + Return the set of minion IDs that manage at least one resource of + ``resource_type``. + + Used by ``CkMinions._check_resource_minions`` to resolve ``T@`` + expressions. Iterates all entries in the ``resources`` bank, filters + by type, and returns the union of all ``managing_minions`` lists. + + The return value mirrors the ``{"minions": [...], "missing": []}`` + shape used throughout ``CkMinions``. + + :param str resource_type: The resource type to query (e.g. + ``"vcf_host"``). + :rtype: dict + """ + + def get_managing_minions_for_id(self, resource_id): + """ + Return the list of minion IDs that manage the resource identified by + ``resource_id``, or an empty list if the resource is not registered. + + Used by ``CkMinions._check_resource_minions`` to resolve + ``T@:`` expressions. + + :param str resource_id: The bare resource ID (e.g. ``"esxi-01"``). + :rtype: list[str] + """ + + def get_resources_for_minion(self, minion_id): + """ + Return the list of resource IDs managed by ``minion_id``. + + Used by ``CkMinions._check_resource_minions`` and + ``CkMinions._check_managing_minion_minions`` to enumerate the resources + a given minion owns when resolving compound expressions on the master. + + :param str minion_id: The minion whose resources are requested. + :rtype: list[str] + """ + + def has_resource_type(self, minion_id, resource_type): + """ + Return ``True`` if ``minion_id`` manages at least one resource of + ``resource_type``. + + Used by master-side compound expression evaluation to verify ownership + when intersecting ``M@`` and ``T@`` expressions. + + :param str minion_id: The minion to check. + :param str resource_type: The resource type to test for. + :rtype: bool + """ + + def has_resource(self, minion_id, resource_id): + """ + Return ``True`` if ``minion_id`` manages the resource identified by + ``resource_id``. + + Used by master-side compound expression evaluation to verify ownership + when intersecting ``M@`` and ``T@`` expressions. + + :param str minion_id: The minion to check. + :param str resource_id: The bare resource ID (e.g. ``"esxi-01"``). + :rtype: bool + """ diff --git a/salt/utils/resources.py b/salt/utils/resources.py new file mode 100644 index 000000000000..1583aa6a83f1 --- /dev/null +++ b/salt/utils/resources.py @@ -0,0 +1,39 @@ +""" +Helpers for Salt resource minions: configurable pillar key and lookups. +""" + +import logging + +log = logging.getLogger(__name__) + + +def resource_pillar_key(opts): + """ + Return the top-level pillar key used for per-type resource configuration. + + Configured by minion option ``resource_pillar_key`` (default ``resources``). + Empty values are rejected with a warning and treated as ``"resources"``. + """ + key = opts.get("resource_pillar_key", "resources") + if not key: + log.warning( + "resource_pillar_key is empty; using default 'resources'. " + "Set resource_pillar_key to a non-empty string in the minion config." + ) + key = "resources" + return key + + +def pillar_resources_tree(opts): + """ + Return the merged pillar mapping under the configured resource pillar key. + + If the key is absent, returns ``{}`` (same as an empty declaration). + Non-dict values are treated as empty. + """ + key = resource_pillar_key(opts) + pillar = opts.get("pillar", {}) + if key not in pillar: + return {} + pr = pillar.get(key) + return pr if isinstance(pr, dict) else {} diff --git a/tests/conftest.py b/tests/conftest.py index 807ee5118858..498529340cd1 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -36,6 +36,7 @@ from tests.support.pytest.helpers import * # pylint: disable=unused-wildcard-import,wildcard-import from tests.support.runtests import RUNTIME_VARS from tests.support.sminion import check_required_sminion_attributes, create_sminion +from tests.support.sshd_runtime import ensure_sshd_privilege_separation_directories TESTS_DIR = pathlib.Path(__file__).resolve().parent PYTESTS_DIR = TESTS_DIR / "pytests" @@ -1412,6 +1413,7 @@ def sshd_server(salt_factories, sshd_config_dir, salt_master, grains): sshd_config_dict=sshd_config_dict, config_dir=sshd_config_dir, ) + ensure_sshd_privilege_separation_directories(factory.config_dir / "sshd_config") with factory.started(): yield factory diff --git a/tests/pytests/integration/modules/saltutil/test_modules.py b/tests/pytests/integration/modules/saltutil/test_modules.py index d35cb735f2e0..cba8eec6d9c1 100644 --- a/tests/pytests/integration/modules/saltutil/test_modules.py +++ b/tests/pytests/integration/modules/saltutil/test_modules.py @@ -48,6 +48,7 @@ def test_sync_all(salt_call_cli): "renderers": [], "log_handlers": [], "matchers": [], + "resources": [], "states": [], "sdb": [], "proxymodules": [], @@ -78,6 +79,7 @@ def test_sync_all_whitelist(salt_call_cli): "renderers": [], "log_handlers": [], "matchers": [], + "resources": [], "states": [], "sdb": [], "proxymodules": [], @@ -114,6 +116,7 @@ def test_sync_all_blacklist(salt_call_cli): "renderers": [], "log_handlers": [], "matchers": [], + "resources": [], "states": [], "sdb": [], "proxymodules": [], @@ -154,6 +157,7 @@ def test_sync_all_blacklist_and_whitelist(salt_call_cli): "renderers": [], "log_handlers": [], "matchers": [], + "resources": [], "states": [], "sdb": [], "proxymodules": [], diff --git a/tests/pytests/integration/resources/__init__.py b/tests/pytests/integration/resources/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/pytests/integration/resources/conftest.py b/tests/pytests/integration/resources/conftest.py new file mode 100644 index 000000000000..2b59d68f813f --- /dev/null +++ b/tests/pytests/integration/resources/conftest.py @@ -0,0 +1,115 @@ +""" +Integration test fixtures for Salt Resources. + +Spins up a master and a minion whose dummy resources (dummy-01, dummy-02) are +declared only in Pillar under ``resources:`` — not in the minion config file. +All tests in this package run against these two daemons. +""" + +import textwrap +import time + +import pytest + +from tests.conftest import FIPS_TESTRUN + +MINION_ID = "resources-minion" + +# Dummy resource IDs that the minion manages in every test in this package. +DUMMY_RESOURCES = ["dummy-01", "dummy-02"] + + +@pytest.fixture(scope="package") +def pillar_tree_dummy_resources(salt_master): + """ + Pillar declaring ``resources.dummy.resource_ids`` for the test minion. + + Resource discovery reads this tree via ``pillar_resources_tree``; the minion + must not rely on a static ``resources:`` key in minion opts. + """ + top_file = textwrap.dedent( + f""" + base: + '{MINION_ID}': + - dummy_resources + """ + ) + pillar_sls = textwrap.dedent( + """ + resources: + dummy: + resource_ids: + - dummy-01 + - dummy-02 + """ + ) + top_tempfile = salt_master.pillar_tree.base.temp_file("top.sls", top_file) + sls_tempfile = salt_master.pillar_tree.base.temp_file( + "dummy_resources.sls", pillar_sls + ) + with top_tempfile, sls_tempfile: + yield + + +@pytest.fixture(scope="package") +def salt_master(request, salt_factories): + config_overrides = { + "interface": "127.0.0.1", + "transport": request.config.getoption("--transport"), + "fips_mode": FIPS_TESTRUN, + "publish_signing_algorithm": ( + "PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1" + ), + } + factory = salt_factories.salt_master_daemon( + "resources-master", + overrides=config_overrides, + extra_cli_arguments_after_first_start_failure=["--log-level=info"], + ) + with factory.started(start_timeout=120): + yield factory + + +@pytest.fixture(scope="package") +def salt_minion(salt_master, pillar_tree_dummy_resources): + config_overrides = { + "fips_mode": FIPS_TESTRUN, + "encryption_algorithm": "OAEP-SHA224" if FIPS_TESTRUN else "OAEP-SHA1", + "signing_algorithm": "PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1", + # Use threads (not processes) — this is the path our Race 1/Race 2 fixes + # target and the most common deployment mode for resource-managing minions. + "multiprocessing": False, + } + factory = salt_master.salt_minion_daemon( + MINION_ID, + overrides=config_overrides, + extra_cli_arguments_after_first_start_failure=["--log-level=info"], + ) + factory.after_terminate( + pytest.helpers.remove_stale_minion_key, salt_master, factory.id + ) + with factory.started(start_timeout=120): + salt_call_cli = factory.salt_call_cli() + ret = salt_call_cli.run("saltutil.refresh_pillar", wait=True, _timeout=120) + assert ret.returncode == 0, ret + assert ret.data is True, ret + ret = salt_call_cli.run("saltutil.sync_all", _timeout=120) + assert ret.returncode == 0, ret + # The minion fires _register_resources_with_master() as a background + # task on connect. Waiting briefly ensures the master cache is + # populated before tests run (typically completes in < 1 s, but the + # sync_all above already takes several seconds so this is a safety net). + time.sleep(3) + yield factory + + +@pytest.fixture(scope="package") +def salt_cli(salt_master): + assert salt_master.is_running() + return salt_master.salt_cli(timeout=60) + + +@pytest.fixture(scope="package") +def salt_call_cli(salt_minion): + assert salt_minion.is_running() + return salt_minion.salt_call_cli(timeout=60) diff --git a/tests/pytests/integration/resources/test_dummy_resource.py b/tests/pytests/integration/resources/test_dummy_resource.py new file mode 100644 index 000000000000..4bd760482834 --- /dev/null +++ b/tests/pytests/integration/resources/test_dummy_resource.py @@ -0,0 +1,128 @@ +""" +End-to-end integration tests for Salt Resources using the dummy resource type. + +These tests verify the full dispatch pipeline: + + salt CLI → master targeting (CkMinions) → minion (_resolve_resource_targets) + → resource loader → return → master re-key → CLI response + +The minion under test loads dummy resources from Pillar only (``resources.dummy`` +with ``resource_ids``) and uses ``multiprocessing: False``. + +The ``dummy`` resource module (``salt/resource/dummy.py``) and its execution +module (``salt/modules/dummyresource_test.py``) are pure-Python in-process +implementations that require no external services. +""" + +import pytest + +from tests.pytests.integration.resources.conftest import DUMMY_RESOURCES + +pytestmark = [pytest.mark.slow_test] + + +def test_minion_has_resources_configured(salt_minion, salt_call_cli): + """Sanity check: the minion must report its resource config before other tests run.""" + ret = salt_call_cli.run("config.get", "resources") + assert ret.returncode == 0, ret + data = ret.data + assert isinstance(data, dict), f"Expected dict, got: {data!r}" + assert "dummy" in data, f"'dummy' missing from config.get resources: {data}" + assert set(data["dummy"]) == { + "dummy-01", + "dummy-02", + }, f"Unexpected resource IDs: {data['dummy']}" + + +def test_glob_wildcard_returns_minion_and_resources(salt_minion, salt_cli): + """ + ``salt '*' test.ping`` must return ``True`` for the managing minion *and* + for every resource it manages. + + This exercises the full pipeline: + - Master ``_augment_with_resources`` adds dummy-01/dummy-02 to the + expected-minion set so the response window stays open. + - Minion ``_resolve_resource_targets`` dispatches two resource jobs. + - Each resource job returns via ``_thread_return`` with ``resource_id``. + - Master ``_return`` remaps ``resource_id`` → ``id`` before delivering. + """ + ret = salt_cli.run("test.ping", minion_tgt="*") + assert ret.returncode == 0, ret + + data = ret.data + assert isinstance(data, dict), f"Expected dict, got: {data!r}" + + # The managing minion must respond. + assert ( + salt_minion.id in data + ), f"Managing minion '{salt_minion.id}' not in response: {list(data)}" + assert data[salt_minion.id] is True + + # Every configured resource must also respond. + for rid in DUMMY_RESOURCES: + assert rid in data, f"Resource '{rid}' missing from response: {list(data)}" + assert data[rid] is True, f"Resource '{rid}' returned non-True: {data[rid]}" + + +def test_T_at_full_srn_returns_only_that_resource(salt_minion, salt_cli): + """ + ``salt -C 'T@dummy:dummy-01' test.ping`` must return a response keyed to + ``dummy-01`` only — not to the managing minion or to dummy-02. + + This exercises the compound-match targeting path: + - Master ``_check_resource_minions`` resolves ``T@dummy:dummy-01`` to the + single resource ID ``dummy-01`` and the managing minion as the delivery + target. + - Minion ``_resolve_resource_targets`` (compound path) dispatches only to + dummy-01 because the T@ term matches exactly one resource. + """ + ret = salt_cli.run("--compound", "test.ping", minion_tgt="T@dummy:dummy-01") + assert ret.returncode == 0, ret + + data = ret.data + assert isinstance(data, dict), f"Expected dict, got: {data!r}" + assert data == { + "dummy-01": True + }, f"Expected only dummy-01 in response, got: {data}" + + +def test_T_at_bare_type_returns_all_resources_of_type(salt_minion, salt_cli): + """ + ``salt -C 'T@dummy' test.ping`` must return ``True`` for every dummy + resource — dummy-01 and dummy-02 — without including the managing minion. + """ + ret = salt_cli.run("--compound", "test.ping", minion_tgt="T@dummy") + assert ret.returncode == 0, ret + + data = ret.data + assert isinstance(data, dict), f"Expected dict, got: {data!r}" + + for rid in DUMMY_RESOURCES: + assert ( + rid in data + ), f"Resource '{rid}' missing from T@dummy response: {list(data)}" + assert data[rid] is True + + # The managing minion should NOT be in the T@-only response. + assert ( + salt_minion.id not in data + ), "Managing minion unexpectedly included in T@dummy response" + + +def test_unknown_resource_function_fails_loudly(salt_minion, salt_cli): + """ + Calling a function that does not exist on a resource must return an error + string, not silently fall through to the managing minion's own module. + + This guards against the pre-resource behaviour where an unknown function + for a resource target would execute on the minion itself. + """ + ret = salt_cli.run( + "--compound", "nonexistent.function", minion_tgt="T@dummy:dummy-01" + ) + # The command fails (non-zero) because the function is unknown. + assert ret.returncode != 0 or ( + isinstance(ret.data, dict) + and isinstance(ret.data.get("dummy-01"), str) + and "nonexistent" in ret.data["dummy-01"].lower() + ), f"Expected error for unknown resource function, got: {ret.data!r}" diff --git a/tests/pytests/integration/resources_ssh/__init__.py b/tests/pytests/integration/resources_ssh/__init__.py new file mode 100644 index 000000000000..4be7ae16bed3 --- /dev/null +++ b/tests/pytests/integration/resources_ssh/__init__.py @@ -0,0 +1 @@ +# Integration tests for SSH Salt Resources (minion-side salt-ssh / relenv path). diff --git a/tests/pytests/integration/resources_ssh/conftest.py b/tests/pytests/integration/resources_ssh/conftest.py new file mode 100644 index 000000000000..6beb5722f813 --- /dev/null +++ b/tests/pytests/integration/resources_ssh/conftest.py @@ -0,0 +1,193 @@ +""" +Fixtures for SSH resource integration tests. + +Spins up the shared session master, a module-scoped sshd, Pillar declaring one +SSH resource (``ssh-int-01``) pointing at that sshd, and a minion that manages +it. This exercises :mod:`salt.resource.ssh` — including ``Single`` built from +minion opts, relenv, and ``cmd_block()`` — which plain salt-ssh integration +tests never touch (they run on the master only). +""" + +from __future__ import annotations + +import glob +import logging +import os +import pathlib +import platform +import shutil +import tempfile +import time + +import pytest + +# sshd usually lives in /usr/sbin, which is not always on a non-login PATH. +for _bindir in ("/usr/sbin", "/usr/local/sbin"): + if os.path.isdir(_bindir) and _bindir not in os.environ.get("PATH", ""): + os.environ["PATH"] = _bindir + os.pathsep + os.environ.get("PATH", "") + +import salt.utils.relenv +from tests.conftest import FIPS_TESTRUN +from tests.support.runtests import RUNTIME_VARS + +log = logging.getLogger(__name__) + +SSH_RESOURCE_ID = "ssh-int-01" +MINION_ID = "ssh-resources-minion" + + +def _detect_kernel_and_arch(): + kernel = platform.system().lower() + if kernel == "darwin": + kernel = "darwin" + elif kernel == "windows": + kernel = "windows" + else: + kernel = "linux" + + machine = platform.machine().lower() + if machine in ("amd64", "x86_64"): + os_arch = "x86_64" + elif machine in ("aarch64", "arm64"): + os_arch = "arm64" + else: + os_arch = machine + return kernel, os_arch + + +@pytest.fixture(scope="session") +def relenv_tarball_for_ssh_resource(): + """ + Pre-resolve a relenv tarball path for populating the minion cache. + + ``salt.resource.ssh._relenv_path`` looks under + ``/relenv/linux//salt-relenv.tar.xz`` for ``x86_64`` or ``arm64``. + """ + shared_cache = os.path.join(tempfile.gettempdir(), "salt_ssh_resource_int_relenv") + os.makedirs(shared_cache, exist_ok=True) + kernel, os_arch = _detect_kernel_and_arch() + + artifacts_glob = str( + pathlib.Path("/salt/artifacts").joinpath( + f"salt-*-onedir-{kernel}-{os_arch}.tar.xz" + ) + ) + for path in glob.glob(artifacts_glob): + if os.path.isfile(path): + log.info("Using CI artifact relenv tarball: %s", path) + return path + + try: + path = salt.utils.relenv.gen_relenv( + shared_cache, kernel=kernel, os_arch=os_arch + ) + if path and os.path.isfile(path): + log.info("Relenv tarball for SSH resource tests: %s", path) + return path + except (OSError, ValueError) as exc: + log.warning("Could not build/download relenv tarball: %s", exc) + return None + + +@pytest.fixture(scope="module") +def pillar_tree_ssh_resources( + salt_master, sshd_server, sshd_config_dir, known_hosts_file +): + """ + Pillar declaring ``resources.ssh.hosts`` for ``ssh-int-01`` → local sshd. + """ + port = sshd_server.listen_port + user = RUNTIME_VARS.RUNNING_TESTS_USER + priv = str(sshd_config_dir / "client_key") + + top_file = f""" + base: + '{MINION_ID}': + - ssh_resources_int + """ + + # Host blocks mirror roster-style auth; ignore_host_keys keeps the test simple. + ssh_pillar = f""" + resources: + ssh: + hosts: + {SSH_RESOURCE_ID}: + host: 127.0.0.1 + port: {port} + user: {user} + priv: {priv} + ignore_host_keys: true + timeout: 180 + """ + + top_tempfile = salt_master.pillar_tree.base.temp_file("top.sls", top_file) + pillar_tempfile = salt_master.pillar_tree.base.temp_file( + "ssh_resources_int.sls", ssh_pillar + ) + with top_tempfile, pillar_tempfile: + yield + + +@pytest.fixture(scope="module") +def salt_minion_ssh_resources( + salt_master, + pillar_tree_ssh_resources, + relenv_tarball_for_ssh_resource, +): + assert salt_master.is_running() + + config_overrides = { + "fips_mode": FIPS_TESTRUN, + "encryption_algorithm": "OAEP-SHA224" if FIPS_TESTRUN else "OAEP-SHA1", + "signing_algorithm": "PKCS1v15-SHA224" if FIPS_TESTRUN else "PKCS1v15-SHA1", + # Match resources/dummy integration: thread pool, resource race coverage. + "multiprocessing": False, + } + + factory = salt_master.salt_minion_daemon( + MINION_ID, + overrides=config_overrides, + extra_cli_arguments_after_first_start_failure=["--log-level=info"], + ) + factory.after_terminate( + pytest.helpers.remove_stale_minion_key, salt_master, factory.id + ) + + with factory.started(start_timeout=120): + cachedir = factory.config["cachedir"] + _kernel, os_arch = _detect_kernel_and_arch() + relenv_subdir = os.path.join(cachedir, "relenv", "linux", os_arch) + os.makedirs(relenv_subdir, exist_ok=True) + dest = os.path.join(relenv_subdir, "salt-relenv.tar.xz") + if relenv_tarball_for_ssh_resource and os.path.isfile( + relenv_tarball_for_ssh_resource + ): + shutil.copyfile(relenv_tarball_for_ssh_resource, dest) + log.info("Installed relenv tarball for minion at %s", dest) + else: + log.warning( + "No relenv tarball available — SSH resource tests that need relenv may fail" + ) + + salt_call_cli = factory.salt_call_cli() + ret = salt_call_cli.run("saltutil.refresh_pillar", wait=True, _timeout=120) + assert ret.returncode == 0, ret + assert ret.data is True, ret + + ret = salt_call_cli.run("saltutil.sync_all", _timeout=120) + assert ret.returncode == 0, ret + + time.sleep(3) + yield factory + + +@pytest.fixture(scope="module") +def salt_call_ssh_resource(salt_minion_ssh_resources): + assert salt_minion_ssh_resources.is_running() + return salt_minion_ssh_resources.salt_call_cli(timeout=120) + + +@pytest.fixture(scope="module") +def salt_cli_ssh_resource(salt_master): + assert salt_master.is_running() + return salt_master.salt_cli(timeout=120) diff --git a/tests/pytests/integration/resources_ssh/test_ssh_resource_integration.py b/tests/pytests/integration/resources_ssh/test_ssh_resource_integration.py new file mode 100644 index 000000000000..7b1b0e942727 --- /dev/null +++ b/tests/pytests/integration/resources_ssh/test_ssh_resource_integration.py @@ -0,0 +1,61 @@ +""" +End-to-end integration tests for the SSH resource type (``salt/resource/ssh.py``). + +Unlike :mod:`tests.pytests.integration.ssh`, which runs **salt-ssh on the master**, +these tests run the **managing minion** path: master publishes to ``T@ssh:…``, +the minion dispatches into the SSH resource loader, which builds +:class:`~salt.client.ssh.Single` from **minion** ``__opts__`` and runs +``cmd_block()`` — the code path that broke with missing ``ext_pillar`` / ``fsclient``. + +Requirements: + +* ``--ssh-tests`` (transports sshd + roster fixtures; see ``requires_sshd_server``). +* A relenv tarball available (CI artifact or downloaded via + :func:`salt.utils.relenv.gen_relenv`), copied into the minion cache by + ``conftest.py``. +""" + +import pytest + +from tests.pytests.integration.resources_ssh.conftest import SSH_RESOURCE_ID + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.requires_sshd_server, + pytest.mark.skip_on_windows(reason="SSH resource integration uses Unix sshd"), +] + + +def test_minion_pillar_lists_ssh_resource( + salt_minion_ssh_resources, salt_call_ssh_resource +): + """Pillar must expose ``resources.ssh.hosts`` for the SSH resource ID.""" + ret = salt_call_ssh_resource.run("pillar.get", "resources:ssh:hosts", _timeout=120) + assert ret.returncode == 0, ret + hosts = ret.data + assert isinstance(hosts, dict), hosts + assert SSH_RESOURCE_ID in hosts, f"missing {SSH_RESOURCE_ID!r}, got {list(hosts)}" + assert hosts[SSH_RESOURCE_ID].get("host") == "127.0.0.1" + + +def test_ssh_resource_T_at_test_ping( + salt_minion_ssh_resources, salt_cli_ssh_resource, relenv_tarball_for_ssh_resource +): + """ + ``salt --compound T@ssh:… test.ping`` runs ``sshresource_test.ping`` → + :func:`salt.resource.ssh.ping` (shell to the SSH resource). The minion + preloads ``ssh.grains`` for ``__grains__`` in the resource loader; that path + must have a usable FSClient (``master_opts`` including ``cachedir``). + """ + if not relenv_tarball_for_ssh_resource: + pytest.skip("No relenv tarball — cannot run SSH resource against relenv bundle") + + ret = salt_cli_ssh_resource.run( + "--compound", + "test.ping", + minion_tgt=f"T@ssh:{SSH_RESOURCE_ID}", + ) + assert ret.returncode == 0, ret + data = ret.data + assert isinstance(data, dict), data + assert data.get(SSH_RESOURCE_ID) is True, data diff --git a/tests/pytests/unit/client/ssh/test_ssh_classes.py b/tests/pytests/unit/client/ssh/test_ssh_classes.py index cabd4ff17224..34c48e004521 100644 --- a/tests/pytests/unit/client/ssh/test_ssh_classes.py +++ b/tests/pytests/unit/client/ssh/test_ssh_classes.py @@ -7,6 +7,22 @@ from salt.exceptions import SaltClientError, SaltSystemExit from tests.support.mock import MagicMock, patch +# Minimal opts that look like a Salt minion config. +# Intentionally omits ``ext_pillar`` — it is a master-only key +# (DEFAULT_MASTER_OPTS) and is absent from DEFAULT_MINION_OPTS. +# salt-ssh's Single.__init__ must not crash when constructed from +# minion opts (as it is when invoked by the SSH resource driver). +_MINION_OPTS = { + "relenv": True, + "cachedir": "/tmp", + "thin_dir": "/tmp/_salt_relenv_test", + "ssh_wipe": False, + "file_roots": {"base": ["/srv/salt"]}, + "pillar_roots": {"base": ["/srv/pillar"]}, + "module_dirs": [], + # ext_pillar is deliberately absent +} + pytestmark = [pytest.mark.skip_unless_on_linux(reason="Test ssh only run on Linux")] @@ -80,3 +96,33 @@ def test_ssh_class(): "salt-ssh could not be run because it could not generate keys." in err.value ) + + +def test_single_init_with_minion_opts_no_ext_pillar(): + """ + Single.__init__ must succeed when given minion opts that lack ``ext_pillar``. + + salt-ssh normally runs on the master, where opts always contain + ``ext_pillar: []`` (it is in DEFAULT_MASTER_OPTS). The SSH resource + driver builds Single from inside a minion process using ``dict(__opts__)``, + which produces minion opts. ``ext_pillar`` is absent from + DEFAULT_MINION_OPTS, so a direct ``opts["ext_pillar"]`` access raises + KeyError. The fix uses ``opts.get("ext_pillar", [])``; this test pins + that behaviour so the regression is immediately obvious if the .get() is + ever reverted. + """ + with patch("salt.loader.ssh_wrapper", return_value=MagicMock()), patch( + "salt.client.ssh.shell.gen_shell", return_value=MagicMock() + ): + single = dunder_ssh.Single( + _MINION_OPTS.copy(), + "test.ping", + "target-host", + host="192.0.2.1", + thin="/fake/salt-relenv.tar.xz", + thin_dir="/tmp/_salt_relenv_test", + ) + + assert ( + single.minion_opts["ext_pillar"] == [] + ), "ext_pillar should default to [] when absent from minion opts" diff --git a/tests/pytests/unit/client/test_netapi.py b/tests/pytests/unit/client/test_netapi.py index 2c99924c4074..67053bfffa97 100644 --- a/tests/pytests/unit/client/test_netapi.py +++ b/tests/pytests/unit/client/test_netapi.py @@ -1,7 +1,7 @@ import logging import salt.client.netapi -from tests.support.mock import Mock, patch +from tests.support.mock import AsyncMock, Mock, patch def test_run_log(caplog, master_opts): @@ -11,6 +11,7 @@ def test_run_log(caplog, master_opts): master_opts["rest_cherrypy"] = {"port": 8000} mock_process = Mock() mock_process.add_process.return_value = True + mock_process.return_value.run = AsyncMock() patch_process = patch.object(salt.utils.process, "ProcessManager", mock_process) with caplog.at_level(logging.INFO): with patch_process: diff --git a/tests/pytests/unit/loader/test_context.py b/tests/pytests/unit/loader/test_context.py index 64b36411f4b5..c720cc4c44ea 100644 --- a/tests/pytests/unit/loader/test_context.py +++ b/tests/pytests/unit/loader/test_context.py @@ -2,10 +2,14 @@ Tests for salt.loader.context """ +import contextvars import copy +import threading +import salt.loader import salt.loader.context import salt.loader.lazy +from tests.support.mock import patch def test_named_loader_context(): @@ -55,3 +59,134 @@ def test_named_loader_context_opts(): with salt.loader.context.loader_context(loader): assert "foo" in opts assert opts["foo"] == "bar" + + +# --------------------------------------------------------------------------- +# resource_ctxvar tests +# --------------------------------------------------------------------------- + + +def test_resource_ctxvar_default_is_empty_dict(): + """resource_ctxvar returns {} when nothing has been set in this context.""" + assert salt.loader.context.resource_ctxvar.get() == {} + + +def test_resource_ctxvar_set_and_get(): + """Setting resource_ctxvar is visible within the same thread.""" + target = {"id": "dummy-01", "type": "dummy"} + tok = salt.loader.context.resource_ctxvar.set(target) + try: + assert salt.loader.context.resource_ctxvar.get() is target + finally: + salt.loader.context.resource_ctxvar.reset(tok) + # After reset the default is restored. + assert salt.loader.context.resource_ctxvar.get() == {} + + +def test_resource_ctxvar_thread_isolation(): + """ + Each thread gets an independent copy of resource_ctxvar. + + This is the core property that fixes Race 1: Thread A setting + resource_ctxvar to target_A must be invisible to Thread B, which sets it + to target_B, even when both threads share the same LazyLoader object. + """ + target_a = {"id": "dummy-01", "type": "dummy"} + target_b = {"id": "dummy-02", "type": "dummy"} + results = {} + + def worker(name, target, barrier): + salt.loader.context.resource_ctxvar.set(target) + # Both threads arrive here before either reads, maximising the + # chance of interference if isolation is broken. + barrier.wait() + results[name] = salt.loader.context.resource_ctxvar.get() + + barrier = threading.Barrier(2) + t1 = threading.Thread(target=worker, args=("a", target_a, barrier)) + t2 = threading.Thread(target=worker, args=("b", target_b, barrier)) + t1.start() + t2.start() + t1.join() + t2.join() + + assert results["a"] is target_a + assert results["b"] is target_b + + +def test_resource_ctxvar_captured_by_copy_context(): + """ + copy_context() snapshots the current resource_ctxvar value. + + LazyLoader.run() calls copy_context() on every invocation, which is why + setting resource_ctxvar in _thread_return before the function executes is + sufficient for the value to be visible inside _run_as without any pack + mutation. + """ + target = {"id": "node1", "type": "ssh"} + tok = salt.loader.context.resource_ctxvar.set(target) + try: + ctx = contextvars.copy_context() + finally: + salt.loader.context.resource_ctxvar.reset(tok) + + # Outside the token the default is restored in *this* context. + assert salt.loader.context.resource_ctxvar.get() == {} + + # But the snapshot captured the value that was current at copy time. + seen = {} + ctx.run(lambda: seen.update({"val": salt.loader.context.resource_ctxvar.get()})) + assert seen["val"] is target + + +def test_named_loader_context_resource_bypasses_pack(): + """ + NamedLoaderContext.value() for __resource__ reads from resource_ctxvar, + not from the loader pack. + + This guarantees that concurrent threads using the same loader object each + see their own resource target regardless of what the shared pack contains. + """ + loader_context = salt.loader.context.LoaderContext() + named = loader_context.named_context("__resource__") + + # With no ctxvar set the default {} is returned even if there is no loader. + assert named.value() == {} + + # Set a target in the current context; the named context must reflect it. + target = {"id": "dummy-03", "type": "dummy"} + tok = salt.loader.context.resource_ctxvar.set(target) + try: + assert named.value() is target + assert named["id"] == "dummy-03" + finally: + salt.loader.context.resource_ctxvar.reset(tok) + + # After reset the default is restored. + assert named.value() == {} + + +def test_resource_modules_packs_resource_dunder(): + """ + salt.loader.resource_modules must include ``"__resource__"`` in its pack + so that LazyLoader creates a NamedLoaderContext for it on every loaded + module. Without this, ``sshresource_state._resource_id()`` raises + ``NameError: name '__resource__' is not defined``. + """ + opts = { + "optimization_order": [0, 1, 2], + "extension_modules": "", + "fileserver_backend": ["roots"], + } + with ( + patch("salt.loader._module_dirs", return_value=[]), + patch("salt.loader.lazy.LazyLoader.__init__", return_value=None) as patched, + ): + salt.loader.resource_modules(opts, "ssh") + assert patched.called, "LazyLoader.__init__ was never called" + _, call_kwargs = patched.call_args + pack = call_kwargs.get("pack", {}) + assert "__resource__" in pack, ( + "resource_modules pack is missing '__resource__'; " + "sshresource_state will raise NameError at runtime" + ) diff --git a/tests/pytests/unit/matchers/test_resource_matchers.py b/tests/pytests/unit/matchers/test_resource_matchers.py new file mode 100644 index 000000000000..68c123de2a14 --- /dev/null +++ b/tests/pytests/unit/matchers/test_resource_matchers.py @@ -0,0 +1,119 @@ +""" +Tests for the T@ (resource_match) and M@ (managing_minion_match) matchers. +""" + +import pytest + +import salt.matchers.managing_minion_match as managing_minion_match +import salt.matchers.resource_match as resource_match + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +RESOURCES = { + "dummy": ["dummy-01", "dummy-02", "dummy-03"], + "ssh": ["node1", "localhost"], +} + + +@pytest.fixture +def opts_with_resources(minion_opts): + minion_opts["id"] = "minion" + minion_opts["resources"] = RESOURCES + return minion_opts + + +@pytest.fixture +def opts_no_resources(minion_opts): + minion_opts["id"] = "minion" + minion_opts.pop("resources", None) + return minion_opts + + +# --------------------------------------------------------------------------- +# resource_match (T@) tests +# --------------------------------------------------------------------------- + + +def test_resource_match_bare_type_hits(opts_with_resources): + """T@dummy matches a minion that manages at least one dummy resource.""" + assert resource_match.match("dummy", opts=opts_with_resources) is True + + +def test_resource_match_bare_type_miss(opts_with_resources): + """T@vcf_host does not match when the minion has no vcf_host resources.""" + assert resource_match.match("vcf_host", opts=opts_with_resources) is False + + +def test_resource_match_full_srn_hits(opts_with_resources): + """T@dummy:dummy-01 matches when dummy-01 is in the dummy resource list.""" + assert resource_match.match("dummy:dummy-01", opts=opts_with_resources) is True + + +def test_resource_match_full_srn_miss_wrong_id(opts_with_resources): + """T@dummy:dummy-99 does not match — dummy-99 is not managed.""" + assert resource_match.match("dummy:dummy-99", opts=opts_with_resources) is False + + +def test_resource_match_full_srn_miss_wrong_type(opts_with_resources): + """T@vcf_host:dummy-01 does not match — type is wrong.""" + assert resource_match.match("vcf_host:dummy-01", opts=opts_with_resources) is False + + +def test_resource_match_ssh_type(opts_with_resources): + """T@ssh matches a minion that manages SSH resources.""" + assert resource_match.match("ssh", opts=opts_with_resources) is True + + +def test_resource_match_ssh_full_srn(opts_with_resources): + """T@ssh:node1 matches the specific SSH resource.""" + assert resource_match.match("ssh:node1", opts=opts_with_resources) is True + + +def test_resource_match_no_resources(opts_no_resources): + """T@dummy returns False when opts has no resources configured.""" + assert resource_match.match("dummy", opts=opts_no_resources) is False + + +def test_resource_match_empty_resources(minion_opts): + """T@dummy returns False when opts["resources"] is an empty dict.""" + minion_opts["resources"] = {} + assert resource_match.match("dummy", opts=minion_opts) is False + + +# --------------------------------------------------------------------------- +# managing_minion_match (M@) tests +# --------------------------------------------------------------------------- + + +def test_managing_minion_match_own_id(opts_with_resources): + """M@minion matches a minion with id='minion'.""" + assert managing_minion_match.match("minion", opts=opts_with_resources) is True + + +def test_managing_minion_match_different_id(opts_with_resources): + """M@other-minion does not match a minion with id='minion'.""" + assert ( + managing_minion_match.match("other-minion", opts=opts_with_resources) is False + ) + + +def test_managing_minion_match_empty_string(opts_with_resources): + """M@ with empty string does not match.""" + assert managing_minion_match.match("", opts=opts_with_resources) is False + + +def test_managing_minion_match_minion_id_kwarg(minion_opts): + """The minion_id kwarg overrides opts['id'] for the comparison.""" + minion_opts["id"] = "minion" + assert ( + managing_minion_match.match( + "override-id", opts=minion_opts, minion_id="override-id" + ) + is True + ) + assert ( + managing_minion_match.match("minion", opts=minion_opts, minion_id="override-id") + is False + ) diff --git a/tests/pytests/unit/modules/test_sshresource_state.py b/tests/pytests/unit/modules/test_sshresource_state.py new file mode 100644 index 000000000000..1d4c9febf0da --- /dev/null +++ b/tests/pytests/unit/modules/test_sshresource_state.py @@ -0,0 +1,361 @@ +""" +Unit tests for salt.modules.sshresource_state. + +Covers: +- highstate(): empty chunks → returns the 'no top file' state dict with + result=False rather than None/empty so the merge block displays it cleanly. +- _exec_state_pkg(): catches SSHCommandExecutionError and extracts the state + result dict from the exception's parsed data when it contains a valid return. + Re-raises the exception when the parsed data does not contain a valid state dict. +""" + +import pytest + +from tests.support.mock import MagicMock, patch + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +_RESOURCE_ID = "node1" + +_VALID_STATE_RETURN = { + "pkg_|-curl_|-curl_|-installed": { + "result": False, + "comment": "Package curl is not installed", + "name": "curl", + "changes": {}, + "__run_num__": 0, + } +} + +_BASE_OPTS = { + "id": "minion", + "resource_type": "ssh", + "cachedir": "/tmp", + "hash_type": "sha256", + "thin_dir": "/tmp/.test_salt", + "test": False, + "pillar": {}, +} + +_BASE_RESOURCE = {"id": _RESOURCE_ID, "type": "ssh"} + + +# --------------------------------------------------------------------------- +# _relenv_path(): returns tarball or None +# --------------------------------------------------------------------------- + + +class TestRelenvPath: + """_relenv_path() returns the first existing tarball or None.""" + + def _run(self, existing_files=()): + import salt.modules.sshresource_state as mod + + opts = _BASE_OPTS.copy() + with patch.object(mod, "__opts__", opts, create=True), patch.object( + mod, "__resource__", dict(_BASE_RESOURCE), create=True + ), patch.object(mod, "__context__", {}, create=True), patch.object( + mod, "__salt__", {}, create=True + ), patch( + "os.path.exists", side_effect=lambda p: p in existing_files + ): + return mod._relenv_path() + + def test_returns_x86_64_when_present(self): + path = "/tmp/relenv/linux/x86_64/salt-relenv.tar.xz" + assert self._run(existing_files=(path,)) == path + + def test_returns_arm64_when_present(self): + path = "/tmp/relenv/linux/arm64/salt-relenv.tar.xz" + assert self._run(existing_files=(path,)) == path + + def test_returns_none_when_no_tarball(self): + assert self._run(existing_files=()) is None + + def test_prefers_x86_64_over_arm64(self): + x86 = "/tmp/relenv/linux/x86_64/salt-relenv.tar.xz" + arm = "/tmp/relenv/linux/arm64/salt-relenv.tar.xz" + assert self._run(existing_files=(x86, arm)) == x86 + + +def _make_ssh_error(parsed): + """Build a fake SSHCommandExecutionError with .parsed attribute.""" + import salt.client.ssh.wrapper + + return salt.client.ssh.wrapper.SSHCommandExecutionError( + "stdout", "stderr", 2, parsed=parsed + ) + + +# --------------------------------------------------------------------------- +# highstate(): empty chunks → no-top-file state dict +# --------------------------------------------------------------------------- + + +class TestHighstateEmptyChunks: + """highstate() with no top-file match must return a proper state dict.""" + + def _run_highstate(self): + import salt.modules.sshresource_state as mod + + opts = _BASE_OPTS.copy() + + mock_state = MagicMock() + mock_state.__enter__ = MagicMock(return_value=mock_state) + mock_state.__exit__ = MagicMock(return_value=False) + mock_state.opts = {"pillar": {}, "test": False} + mock_state.compile_low_chunks.return_value = [] # no chunks → no top file + + with patch.object(mod, "__opts__", opts, create=True), patch.object( + mod, "__resource__", dict(_BASE_RESOURCE), create=True + ), patch.object(mod, "__context__", {}, create=True), patch.object( + mod, "__salt__", {}, create=True + ), patch.object( + mod, "_target_opts", return_value=opts + ), patch.object( + mod, "_seed_thin_dir", return_value="/tmp/.test_salt" + ), patch.object( + mod, "_get_initial_pillar", return_value=None + ), patch.object( + mod, "_file_client", return_value=MagicMock() + ), patch( + "salt.client.ssh.state.SSHHighState", return_value=mock_state + ), patch( + "salt.utils.state.get_sls_opts", return_value=opts + ): + return mod.highstate() + + def test_returns_dict(self): + result = self._run_highstate() + assert isinstance(result, dict), f"Expected dict, got {type(result)}" + + def test_uses_no_state_key(self): + result = self._run_highstate() + assert "no_|-states_|-states_|-None" in result + + def test_result_is_false(self): + result = self._run_highstate() + entry = result["no_|-states_|-states_|-None"] + assert entry["result"] is False + + def test_comment_mentions_resource_id(self): + result = self._run_highstate() + comment = result["no_|-states_|-states_|-None"]["comment"] + assert _RESOURCE_ID in comment + + def test_changes_empty(self): + result = self._run_highstate() + assert result["no_|-states_|-states_|-None"]["changes"] == {} + + +# --------------------------------------------------------------------------- +# _exec_state_pkg(): SSHCommandExecutionError recovery +# --------------------------------------------------------------------------- + + +class TestExecStatePkg: + """_exec_state_pkg must recover valid state dicts from SSHCommandExecutionError.""" + + def _run(self, exc_parsed): + """ + Run _exec_state_pkg with a mocked Single that raises SSHCommandExecutionError. + Returns (result, context_dict). + """ + import salt.modules.sshresource_state as mod + + opts = _BASE_OPTS.copy() + context = {} + exc = _make_ssh_error(exc_parsed) + + with patch.object(mod, "__opts__", opts, create=True), patch.object( + mod, "__resource__", dict(_BASE_RESOURCE), create=True + ), patch.object(mod, "__context__", context, create=True), patch.object( + mod, "__salt__", {}, create=True + ), patch.object( + mod, "_resource_id", return_value=_RESOURCE_ID + ), patch.object( + mod, "_relenv_path", return_value="/tmp/relenv.tar.xz" + ), patch.object( + mod, "_file_client", return_value=MagicMock() + ), patch.object( + mod, "_connection_kwargs", return_value={} + ), patch( + "salt.utils.hashutils.get_hash", return_value="abc123" + ), patch( + "os.remove" + ), patch( + "salt.client.ssh.Single" + ) as mock_single_cls, patch( + "salt.client.ssh.wrapper.parse_ret", side_effect=exc + ): + mock_single = MagicMock() + mock_single.cmd_block.return_value = ('{"local": {}}', "", 2) + mock_single.shell = MagicMock() + mock_single_cls.return_value = mock_single + + result = mod._exec_state_pkg(opts, "/tmp/fake.tgz", False) + return result, context + + def test_extracts_state_dict_from_exception(self): + parsed = {"local": {"return": _VALID_STATE_RETURN, "retcode": 2}} + result, _ = self._run(parsed) + assert result == _VALID_STATE_RETURN + + def test_sets_retcode_from_exception(self): + parsed = {"local": {"return": _VALID_STATE_RETURN, "retcode": 2}} + _, context = self._run(parsed) + assert context.get("retcode") == 2 + + def test_reraises_when_local_missing(self): + import salt.client.ssh.wrapper + + with pytest.raises(salt.client.ssh.wrapper.SSHCommandExecutionError): + self._run({}) # no "local" key + + def test_reraises_when_return_not_dict(self): + import salt.client.ssh.wrapper + + parsed = {"local": {"return": "raw string output", "retcode": 1}} + with pytest.raises(salt.client.ssh.wrapper.SSHCommandExecutionError): + self._run(parsed) + + def test_reraises_when_parsed_is_none(self): + import salt.client.ssh.wrapper + import salt.modules.sshresource_state as mod + + opts = _BASE_OPTS.copy() + context = {} + + exc = salt.client.ssh.wrapper.SSHCommandExecutionError( + "stdout", "stderr", 1, parsed=None + ) + + with patch.object(mod, "__opts__", opts, create=True), patch.object( + mod, "__resource__", dict(_BASE_RESOURCE), create=True + ), patch.object(mod, "__context__", context, create=True), patch.object( + mod, "__salt__", {}, create=True + ), patch.object( + mod, "_resource_id", return_value=_RESOURCE_ID + ), patch.object( + mod, "_relenv_path", return_value="/tmp/relenv.tar.xz" + ), patch.object( + mod, "_file_client", return_value=MagicMock() + ), patch.object( + mod, "_connection_kwargs", return_value={} + ), patch( + "salt.utils.hashutils.get_hash", return_value="abc123" + ), patch( + "os.remove" + ), patch( + "salt.client.ssh.Single" + ) as mock_single_cls, patch( + "salt.client.ssh.wrapper.parse_ret", side_effect=exc + ): + mock_single = MagicMock() + mock_single.cmd_block.return_value = ("", "", 1) + mock_single.shell = MagicMock() + mock_single_cls.return_value = mock_single + + with pytest.raises(salt.client.ssh.wrapper.SSHCommandExecutionError): + mod._exec_state_pkg(opts, "/tmp/fake.tgz", False) + + +# --------------------------------------------------------------------------- +# _exec_state_pkg(): normal (non-exception) path +# --------------------------------------------------------------------------- + + +class TestExecStatePkgNormalPath: + """_exec_state_pkg must unwrap the envelope dict returned by parse_ret.""" + + def _run(self, envelope): + import salt.modules.sshresource_state as mod + + opts = _BASE_OPTS.copy() + context = {} + + with patch.object(mod, "__opts__", opts, create=True), patch.object( + mod, "__resource__", dict(_BASE_RESOURCE), create=True + ), patch.object(mod, "__context__", context, create=True), patch.object( + mod, "__salt__", {}, create=True + ), patch.object( + mod, "_resource_id", return_value=_RESOURCE_ID + ), patch.object( + mod, "_relenv_path", return_value="/tmp/relenv.tar.xz" + ), patch.object( + mod, "_file_client", return_value=MagicMock() + ), patch.object( + mod, "_connection_kwargs", return_value={} + ), patch( + "salt.utils.hashutils.get_hash", return_value="abc123" + ), patch( + "os.remove" + ), patch( + "salt.client.ssh.Single" + ) as mock_single_cls, patch( + "salt.client.ssh.wrapper.parse_ret", return_value=envelope + ): + mock_single = MagicMock() + mock_single.cmd_block.return_value = ("", "", 0) + mock_single.shell = MagicMock() + mock_single_cls.return_value = mock_single + + result = mod._exec_state_pkg(opts, "/tmp/fake.tgz", False) + return result, context + + def test_returns_state_dict_from_envelope(self): + envelope = {"return": _VALID_STATE_RETURN, "retcode": 0} + result, _ = self._run(envelope) + assert result == _VALID_STATE_RETURN + + def test_sets_retcode_on_non_zero_envelope(self): + envelope = {"return": _VALID_STATE_RETURN, "retcode": 2} + _, context = self._run(envelope) + assert context.get("retcode") == 2 + + def test_zero_retcode_does_not_set_context_retcode(self): + """A clean run (retcode 0) must not inject a non-zero retcode into context.""" + envelope = {"return": _VALID_STATE_RETURN, "retcode": 0} + _, context = self._run(envelope) + assert context.get("retcode", 0) == 0 + + def test_single_receives_fsclient(self): + """Single must be constructed with a fsclient so cmd_block can call mod_data.""" + import salt.modules.sshresource_state as mod + + opts = _BASE_OPTS.copy() + mock_fsclient = MagicMock() + + with patch.object(mod, "__opts__", opts, create=True), patch.object( + mod, "__resource__", dict(_BASE_RESOURCE), create=True + ), patch.object(mod, "__context__", {}, create=True), patch.object( + mod, "__salt__", {}, create=True + ), patch.object( + mod, "_resource_id", return_value=_RESOURCE_ID + ), patch.object( + mod, "_relenv_path", return_value="/tmp/relenv.tar.xz" + ), patch.object( + mod, "_file_client", return_value=mock_fsclient + ), patch.object( + mod, "_connection_kwargs", return_value={} + ), patch( + "salt.utils.hashutils.get_hash", return_value="abc123" + ), patch( + "os.remove" + ), patch( + "salt.client.ssh.Single" + ) as mock_single_cls, patch( + "salt.client.ssh.wrapper.parse_ret", + return_value={"return": _VALID_STATE_RETURN, "retcode": 0}, + ): + mock_single = MagicMock() + mock_single.cmd_block.return_value = ("", "", 0) + mock_single.shell = MagicMock() + mock_single_cls.return_value = mock_single + + mod._exec_state_pkg(opts, "/tmp/fake.tgz", False) + + _, kwargs = mock_single_cls.call_args + assert kwargs.get("fsclient") is mock_fsclient diff --git a/tests/pytests/unit/resources/__init__.py b/tests/pytests/unit/resources/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/pytests/unit/resources/test_ssh_resource.py b/tests/pytests/unit/resources/test_ssh_resource.py new file mode 100644 index 000000000000..ed2a68b71876 --- /dev/null +++ b/tests/pytests/unit/resources/test_ssh_resource.py @@ -0,0 +1,115 @@ +""" +Unit tests for salt.resource.ssh. + +Covers the _make_single() helper which constructs a salt.client.ssh.Single +from inside a minion job thread — a code path that salt-ssh itself never +takes (salt-ssh always runs on the master). +""" + +import pytest + +from tests.support.mock import MagicMock, patch + +# --------------------------------------------------------------------------- +# Shared fixtures +# --------------------------------------------------------------------------- + +_RESOURCE_ID = "node1" + +_BASE_OPTS = { + "id": "minion", + "cachedir": "/tmp", + "thin_dir": "/tmp/_salt_relenv_test", + "ssh_wipe": False, + "file_roots": {"base": ["/srv/salt"]}, + "pillar_roots": {"base": ["/srv/pillar"]}, + "module_dirs": [], + # ext_pillar intentionally absent — it is a master-only key +} + +_BASE_RESOURCE = {"id": _RESOURCE_ID, "type": "ssh"} + +_HOST_CFG = { + "host": "192.0.2.1", + "user": "root", + "port": 22, +} + + +def _patch_module(mod, extra_context=None): + """Return a stack of patches that give the module its dunder variables.""" + context = {"ssh_resource": {"master_opts": None, "_ssh_version": "8.0"}} + if extra_context: + context["ssh_resource"].update(extra_context) + return [ + patch.object(mod, "__opts__", _BASE_OPTS.copy(), create=True), + patch.object(mod, "__resource__", dict(_BASE_RESOURCE), create=True), + patch.object(mod, "__context__", context, create=True), + patch.object(mod, "__salt__", {}, create=True), + ] + + +# --------------------------------------------------------------------------- +# _make_single passes fsclient to Single +# --------------------------------------------------------------------------- + + +class TestMakeSingle: + """_make_single() must pass a fsclient to Single. + + Single.cmd_block() calls mod_data(self.fsclient) unconditionally + (added in the upstream relenv improvements merge). If fsclient is + None that call raises: + + AttributeError: 'NoneType' object has no attribute 'opts' + + The fix is for _make_single to call _file_client() and forward the + result as the fsclient= keyword argument to Single.__init__. + """ + + def test_single_receives_fsclient(self): + import contextlib + + import salt.resource.ssh as mod + + mock_fsclient = MagicMock() + mock_single_cls = MagicMock() + + fixed_patches = [ + patch.object(mod, "_host_cfg", return_value=_HOST_CFG), + patch.object(mod, "_relenv_path", return_value="/tmp/fake-relenv.tar.xz"), + patch.object(mod, "_file_client", return_value=mock_fsclient), + patch.object(mod, "_thin_dir", return_value="/tmp/_salt_relenv_test"), + patch("salt.client.ssh.Single", mock_single_cls), + patch("salt.client.ssh.ssh_version", return_value="8.0"), + ] + _patch_module(mod) + + with contextlib.ExitStack() as stack: + for p in fixed_patches: + stack.enter_context(p) + mod._make_single(_RESOURCE_ID, ["grains.items"]) + + _, kwargs = mock_single_cls.call_args + assert kwargs.get("fsclient") is mock_fsclient, ( + "_make_single must pass fsclient= to Single so that " + "cmd_block() can call mod_data(fsclient) without crashing" + ) + + def test_fsclient_none_would_crash(self): + """Confirm that omitting fsclient causes the crash this fix prevents. + + This test documents *why* the fix is needed: if fsclient is None, + mod_data() raises AttributeError on 'NoneType'.opts. + """ + + def fake_mod_data(fsclient): + if fsclient is None: + raise AttributeError("'NoneType' object has no attribute 'opts'") + return {} + + with patch("salt.client.ssh.mod_data", fake_mod_data): + with pytest.raises(AttributeError, match="NoneType.*opts"): + fake_mod_data(None) + + # With a real fsclient it works fine + assert fake_mod_data(MagicMock()) == {} diff --git a/tests/pytests/unit/test_minion_resources.py b/tests/pytests/unit/test_minion_resources.py new file mode 100644 index 000000000000..2caf69df5069 --- /dev/null +++ b/tests/pytests/unit/test_minion_resources.py @@ -0,0 +1,564 @@ +""" +Tests for resource dispatch logic in salt.minion. + +Covers: +- Minion._resolve_resource_targets(): what resource jobs each minion spawns +- gen_modules() atomic resource_loaders assignment: Race 2 fix +- resource_ctxvar injection in _thread_return: Race 1 fix +""" + +import threading + +import pytest + +import salt.loader.context +import salt.minion +from tests.support.mock import patch as mock_patch + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +RESOURCES = { + "dummy": ["dummy-01", "dummy-02", "dummy-03"], + "ssh": ["node1", "localhost"], +} + + +@pytest.fixture +def minion_with_resources(minion_opts): + """A Minion instance with resources configured, no real master connection.""" + minion_opts["resources"] = RESOURCES + minion_opts["multiprocessing"] = False + with mock_patch("salt.minion.Minion.gen_modules"): + with mock_patch("salt.minion.Minion.connect_master"): + m = salt.minion.Minion(minion_opts, load_grains=False) + return m + + +# --------------------------------------------------------------------------- +# _resolve_resource_targets tests +# --------------------------------------------------------------------------- + + +def test_resolve_resource_targets_glob_wildcard(minion_with_resources): + """ + A broad glob ('*') with no resource-aware tgt_type returns all managed + resources so that every resource job is dispatched. + """ + load = {"tgt": "*", "tgt_type": "glob", "fun": "test.ping", "arg": []} + targets = minion_with_resources._resolve_resource_targets(load) + ids = [t["id"] for t in targets] + assert set(ids) == {"dummy-01", "dummy-02", "dummy-03", "node1", "localhost"} + types = {t["type"] for t in targets} + assert types == {"dummy", "ssh"} + + +def test_resolve_resource_targets_glob_specific_minion(minion_with_resources): + """ + A specific name glob (no wildcard characters) must NOT dispatch to + resources. ``salt 'minion' test.ping`` should only run on the minion + itself, not on its managed resources. + """ + load = {"tgt": "minion", "tgt_type": "glob", "fun": "test.ping", "arg": []} + targets = minion_with_resources._resolve_resource_targets(load) + assert targets == [], "specific-name glob must not dispatch to resources" + + +def test_resolve_resource_targets_compound_T_full_srn(minion_with_resources): + """T@dummy:dummy-01 in a compound expression returns exactly that resource.""" + load = { + "tgt": "T@dummy:dummy-01", + "tgt_type": "compound", + "fun": "test.ping", + "arg": [], + } + targets = minion_with_resources._resolve_resource_targets(load) + assert targets == [{"id": "dummy-01", "type": "dummy"}] + + +def test_resolve_resource_targets_compound_T_bare_type(minion_with_resources): + """T@dummy returns all dummy resources.""" + load = { + "tgt": "T@dummy", + "tgt_type": "compound", + "fun": "test.ping", + "arg": [], + } + targets = minion_with_resources._resolve_resource_targets(load) + ids = [t["id"] for t in targets] + assert set(ids) == {"dummy-01", "dummy-02", "dummy-03"} + assert all(t["type"] == "dummy" for t in targets) + + +def test_resolve_resource_targets_compound_no_T(minion_with_resources): + """A compound expression with no T@ or M@ terms dispatches no resource jobs.""" + load = { + "tgt": "G@os:Debian", + "tgt_type": "compound", + "fun": "test.ping", + "arg": [], + } + targets = minion_with_resources._resolve_resource_targets(load) + assert targets == [] + + +def test_resolve_resource_targets_no_resources(minion_opts): + """A minion with no resources configured never dispatches resource jobs.""" + minion_opts.pop("resources", None) + with mock_patch("salt.minion.Minion.gen_modules"): + with mock_patch("salt.minion.Minion.connect_master"): + m = salt.minion.Minion(minion_opts, load_grains=False) + load = {"tgt": "*", "tgt_type": "glob", "fun": "test.ping", "arg": []} + assert m._resolve_resource_targets(load) == [] + + +def test_resolve_resource_targets_no_resource_funs(minion_with_resources): + """ + Internal Salt plumbing functions are never dispatched to resources, even + for a wildcard target. + """ + for fun in salt.minion.Minion._NO_RESOURCE_FUNS: + load = {"tgt": "*", "tgt_type": "glob", "fun": fun, "arg": []} + assert minion_with_resources._resolve_resource_targets(load) == [], fun + + +def test_resolve_resource_targets_T_with_trailing_colon(minion_with_resources): + """T@dummy: (trailing colon) is treated as a bare type, not a specific ID.""" + load = { + "tgt": "T@dummy:", + "tgt_type": "compound", + "fun": "test.ping", + "arg": [], + } + targets = minion_with_resources._resolve_resource_targets(load) + ids = {t["id"] for t in targets} + assert ids == {"dummy-01", "dummy-02", "dummy-03"} + + +# --------------------------------------------------------------------------- +# gen_modules() atomic resource_loaders assignment (Race 2 fix) +# --------------------------------------------------------------------------- + + +def test_gen_modules_resource_loaders_atomic_assignment(): + """ + The atomic build-then-assign pattern ensures self.resource_loaders is + never transiently empty between gen_modules() calls. + + We verify this by confirming that in the gen_modules source the actual + assignment ``self.resource_loaders = _new_resource_loaders`` appears, and + that no bare ``self.resource_loaders = {}`` statement (outside comments) + is present. + """ + import inspect + + source = inspect.getsource(salt.minion.MinionBase.gen_modules) + # The safe atomic-assign terminal statement must be present. + assert "self.resource_loaders = _new_resource_loaders" in source + # Verify no executable bare-clear line exists (comments are OK). + executable_lines = [ + ln for ln in source.splitlines() if not ln.lstrip().startswith("#") + ] + assert not any( + "self.resource_loaders = {}" in ln for ln in executable_lines + ), "Found bare self.resource_loaders = {} outside a comment — Race 2 regression" + + +# --------------------------------------------------------------------------- +# resource_ctxvar injection in _thread_return (Race 1 fix) +# --------------------------------------------------------------------------- + + +def test_resource_ctxvar_set_before_function_executes(): + """ + _thread_return sets resource_ctxvar to the resource_target dict before the + job function runs. This test simulates the critical section and confirms + the ctxvar carries the right value into a copy_context() snapshot — the + mechanism that makes the fix thread-safe. + """ + import contextvars + + target = {"id": "dummy-01", "type": "dummy"} + + # Simulate what _thread_return does. + tok = salt.loader.context.resource_ctxvar.set(target) + try: + # copy_context() is what LazyLoader.run() calls on every invocation. + ctx = contextvars.copy_context() + finally: + salt.loader.context.resource_ctxvar.reset(tok) + + # The copy captured the value; the current context is back to default. + assert salt.loader.context.resource_ctxvar.get() == {} + + # But inside the snapshot the target is visible — exactly as it will be + # inside _run_as when the module function reads __resource__. + seen = {} + ctx.run(lambda: seen.update({"val": salt.loader.context.resource_ctxvar.get()})) + assert seen["val"] is target + + +def test_resource_ctxvar_concurrent_threads_isolated(): + """ + Two threads setting resource_ctxvar concurrently never see each other's + values. This directly validates the fix for Race 1 (KeyError: 'id'). + """ + target_a = {"id": "dummy-01", "type": "dummy"} + target_b = {"id": "dummy-02", "type": "dummy"} + errors = [] + results = {} + barrier = threading.Barrier(2) + + def run_job(name, target): + try: + salt.loader.context.resource_ctxvar.set(target) + barrier.wait() # both threads set before either reads + val = salt.loader.context.resource_ctxvar.get() + if val is not target: + errors.append( + f"Thread {name}: expected {target['id']}, got {val.get('id')}" + ) + results[name] = val + except Exception as exc: # pylint: disable=broad-except + errors.append(str(exc)) + + t1 = threading.Thread(target=run_job, args=("a", target_a)) + t2 = threading.Thread(target=run_job, args=("b", target_b)) + t1.start() + t2.start() + t1.join() + t2.join() + + assert not errors, errors + assert results["a"] is target_a + assert results["b"] is target_b + + +# --------------------------------------------------------------------------- +# _discover_resources tests +# --------------------------------------------------------------------------- + + +def test_discover_resources_no_pillar_key_clears_like_empty(minion_with_resources): + """ + When the pillar contains no 'resources' key at all, _discover_resources + must behave like pillar['resources'] == {}: return {} and not preserve + stale opts["resources"]. + """ + minion_with_resources.opts["pillar"] = {} + result = minion_with_resources._discover_resources() + assert result == {} + + +def test_discover_resources_empty_pillar_key_clears_opts(minion_with_resources): + """ + When the pillar *does* contain a 'resources' key but its value is empty, + _discover_resources must return {} and NOT preserve the old opts resources. + This is the fix for the stale-cache bug: removing a resource type from the + pillar and running sync_all must clear it at runtime. + """ + minion_with_resources.opts["pillar"] = {"resources": {}} + result = minion_with_resources._discover_resources() + assert ( + result == {} + ), "_discover_resources must return {} when pillar['resources'] is empty" + + +# --------------------------------------------------------------------------- +# _register_resources_with_master tests +# --------------------------------------------------------------------------- + + +def test_register_resources_with_master_sends_empty_dict(minion_with_resources): + """ + _register_resources_with_master must send the registration even when + opts["resources"] is {}. Without this, removing resources from the pillar + and running sync_all leaves the master cache permanently stale. + """ + minion_with_resources.opts["resources"] = {} + sent_loads = [] + + async def fake_send(load, timeout=None): + sent_loads.append(load) + + import asyncio + + minion_with_resources.tok = b"test-tok" + with mock_patch.object( + minion_with_resources, + "_send_req_async_main", + side_effect=fake_send, + ): + asyncio.run(minion_with_resources._register_resources_with_master()) + + assert ( + len(sent_loads) == 1 + ), "_register_resources_with_master must always send a load, even for {}" + assert ( + sent_loads[0]["resources"] == {} + ), "An empty resource dict must be forwarded to the master to clear stale cache" + + +# --------------------------------------------------------------------------- +# _MERGE_RESOURCE_FUNS tests +# --------------------------------------------------------------------------- + + +def test_merge_resource_funs_contains_expected_state_functions(): + """All state-dispatch functions that should trigger merge mode are present.""" + expected = { + "state.apply", + "state.highstate", + "state.sls", + "state.sls_id", + "state.single", + } + assert expected <= salt.minion.Minion._MERGE_RESOURCE_FUNS + + +def test_merge_resource_funs_does_not_contain_test_ping(): + """test.ping must NOT be in _MERGE_RESOURCE_FUNS so it dispatches normally.""" + assert "test.ping" not in salt.minion.Minion._MERGE_RESOURCE_FUNS + + +def test_merge_resource_funs_is_frozenset(): + assert isinstance(salt.minion.Minion._MERGE_RESOURCE_FUNS, frozenset) + + +def test_merge_resource_funs_minions_and_minion_in_sync(): + """_MERGE_RESOURCE_FUNS must be identical in salt.minion and salt.utils.minions.""" + import salt.utils.minions as _minions_mod + + assert salt.minion.Minion._MERGE_RESOURCE_FUNS == _minions_mod._MERGE_RESOURCE_FUNS + + +# --------------------------------------------------------------------------- +# _prefix_resource_state_key tests +# --------------------------------------------------------------------------- + + +def test_prefix_resource_state_key_id_and_name_prefixed(): + """Both the id (comps[1]) and name (comps[2]) components gain the rid prefix.""" + key = "pkg_|-curl_|-curl_|-installed" + result = salt.minion.Minion._prefix_resource_state_key(key, "node1") + assert result == "pkg_|-node1 curl_|-node1 curl_|-installed" + + +def test_prefix_resource_state_key_preserves_module_and_function(): + """comps[0] (module) and comps[3] (function) are unchanged.""" + key = "pkg_|-curl_|-curl_|-installed" + result = salt.minion.Minion._prefix_resource_state_key(key, "node1") + parts = result.split("_|-") + assert parts[0] == "pkg" + assert parts[3] == "installed" + + +def test_prefix_resource_state_key_id_with_spaces(): + """Resource IDs containing spaces are handled correctly.""" + key = "service_|-nginx_|-nginx_|-running" + result = salt.minion.Minion._prefix_resource_state_key(key, "my host") + assert result == "service_|-my host nginx_|-my host nginx_|-running" + + +def test_prefix_resource_state_key_no_top_file_key(): + """The 'no_|-states_|-states_|-None' key used for empty-top returns is prefixed.""" + key = "no_|-states_|-states_|-None" + result = salt.minion.Minion._prefix_resource_state_key(key, "node1") + assert result == "no_|-node1 states_|-node1 states_|-None" + + +def test_prefix_resource_state_key_malformed_key_falls_back(): + """A key that cannot be split into 4 parts produces the fallback synthetic key.""" + result = salt.minion.Minion._prefix_resource_state_key("not-a-state-key", "node1") + assert result == "no_|-node1_|-node1_|-None" + + +def test_prefix_resource_state_key_three_part_key_falls_back(): + """Only three _|- separators → fallback.""" + key = "pkg_|-curl_|-curl" + result = salt.minion.Minion._prefix_resource_state_key(key, "node1") + assert result == "no_|-node1_|-node1_|-None" + + +# --------------------------------------------------------------------------- +# _handle_payload merge-mode guard (source inspection) +# --------------------------------------------------------------------------- + + +def test_handle_payload_skips_resource_dispatch_for_merge_funs(): + """ + _handle_payload must guard the separate resource-dispatch block with a + 'fun not in _MERGE_RESOURCE_FUNS' check. A missing guard would cause + duplicate responses for state.apply jobs. + """ + import inspect + + source = inspect.getsource(salt.minion.Minion._handle_payload) + assert "_MERGE_RESOURCE_FUNS" in source, ( + "_handle_payload must reference _MERGE_RESOURCE_FUNS to skip " + "redundant resource job dispatch for merge-mode functions" + ) + assert "resource_targets" in source + + +# --------------------------------------------------------------------------- +# Merge block helper: _merge_resource_into_ret logic +# --------------------------------------------------------------------------- + + +def _make_ret(return_val=None, retcode=0): + """Build a minimal ret dict as produced by _thread_return.""" + return { + "return": return_val if return_val is not None else {}, + "retcode": retcode, + "success": retcode == 0, + } + + +def _run_merge_block( + minion_instance, resource, resource_loader, function_name, resource_return +): + """ + Simulate the per-resource section of _thread_return's merge block. + + Drives the same if/elif/else branches: + - resource_loader is None → no-loader synthetic entry + - function_name not in resource_loader → unsupported string + - resource_return is a dict → prefix keys and merge + - resource_return is a str → synthetic entry with result False + """ + import salt.defaults.exitcodes + + ret = _make_ret() + run_num_base = 0 + rid = resource["id"] + rtype = resource["type"] + + if resource_loader is None: + ret["return"][f"no_|-{rid}_|-{rid}_|-None"] = { + "result": False, + "comment": f"No resource loader for type '{rtype}'. Ensure the resource module exists.", + "name": rid, + "changes": {}, + "__run_num__": run_num_base, + } + run_num_base += 1 + if ret.get("retcode") == salt.defaults.exitcodes.EX_OK: + ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC + elif function_name not in resource_loader: + resource_return = ( + f"Function '{function_name}' is not supported for resource type '{rtype}'. " + f"Implement it in a '{rtype}resource_*' execution module." + ) + ret["return"][f"no_|-{rid}_|-{rid}_|-None"] = { + "result": False, + "comment": str(resource_return), + "name": rid, + "changes": {}, + "__run_num__": run_num_base, + } + run_num_base += 1 + if ret.get("retcode") == salt.defaults.exitcodes.EX_OK: + ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC + elif isinstance(resource_return, dict): + for state_id, state_val in resource_return.items(): + entry = ( + dict(state_val) + if isinstance(state_val, dict) + else { + "result": True, + "comment": str(state_val), + "name": rid, + "changes": {}, + } + ) + entry["__run_num__"] = run_num_base + run_num_base += 1 + ret["return"][ + salt.minion.Minion._prefix_resource_state_key(state_id, rid) + ] = entry + if ret.get("retcode") == salt.defaults.exitcodes.EX_OK: + # retcode only updated when resource_loader context signals failure + pass + else: + ret["return"][f"no_|-{rid}_|-{rid}_|-None"] = { + "result": False, + "comment": str(resource_return), + "name": rid, + "changes": {}, + "__run_num__": run_num_base, + } + run_num_base += 1 + if ret.get("retcode") == salt.defaults.exitcodes.EX_OK: + ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC + + ret["success"] = ret.get("retcode") == salt.defaults.exitcodes.EX_OK + return ret + + +class _FakeLoader(dict): + """Minimal stand-in for a resource loader (just a dict with a pack).""" + + def __init__(self, funs): + super().__init__(funs) + self.pack = {"__context__": {}} + + +def test_merge_block_no_loader_produces_false_entry(minion_with_resources): + resource = {"id": "dummy-01", "type": "dummy"} + ret = _run_merge_block(minion_with_resources, resource, None, "state.apply", None) + key = "no_|-dummy-01_|-dummy-01_|-None" + assert key in ret["return"] + assert ret["return"][key]["result"] is False + assert "No resource loader" in ret["return"][key]["comment"] + assert ret["retcode"] != 0 + assert ret["success"] is False + + +def test_merge_block_unsupported_function_produces_false_entry(minion_with_resources): + resource = {"id": "dummy-01", "type": "dummy"} + loader = _FakeLoader({}) # empty — function not present + ret = _run_merge_block(minion_with_resources, resource, loader, "state.apply", None) + key = "no_|-dummy-01_|-dummy-01_|-None" + assert key in ret["return"] + assert ret["return"][key]["result"] is False + assert "not supported" in ret["return"][key]["comment"] + assert ret["retcode"] != 0 + + +def test_merge_block_dict_return_prefixes_keys(minion_with_resources): + resource = {"id": "node1", "type": "ssh"} + loader = _FakeLoader({"state.apply": lambda: {}}) + resource_return = { + "pkg_|-curl_|-curl_|-installed": { + "result": True, + "comment": "Already installed", + "name": "curl", + "changes": {}, + } + } + ret = _run_merge_block( + minion_with_resources, resource, loader, "state.apply", resource_return + ) + assert "pkg_|-node1 curl_|-node1 curl_|-installed" in ret["return"] + assert ( + "pkg_|-curl_|-curl_|-installed" not in ret["return"] + ), "un-prefixed key must not appear" + + +def test_merge_block_string_return_produces_false_entry(minion_with_resources): + resource = {"id": "node1", "type": "ssh"} + loader = _FakeLoader({"state.apply": lambda: "some error"}) + ret = _run_merge_block( + minion_with_resources, + resource, + loader, + "state.apply", + "ERROR running state.apply", + ) + key = "no_|-node1_|-node1_|-None" + assert key in ret["return"] + assert ret["return"][key]["result"] is False + assert ret["retcode"] != 0 + assert ret["success"] is False diff --git a/tests/pytests/unit/utils/test_minions_resources.py b/tests/pytests/unit/utils/test_minions_resources.py new file mode 100644 index 000000000000..2898ad21e2f5 --- /dev/null +++ b/tests/pytests/unit/utils/test_minions_resources.py @@ -0,0 +1,442 @@ +""" +Tests for resource-aware targeting in salt.utils.minions. + +Covers: +- _build_resource_index(): constructs the flat three-way index +- _get_resource_index(): in-process caching with TTL +- _update_resource_index(): atomic update of index + cache +- CkMinions._augment_with_resources(): adds resource IDs to wildcard results +- CkMinions._check_resource_minions(): resolves T@ expressions +- check_minions() wildcard-augmentation conditional logic +""" + +import pytest + +import salt.utils.minions +from salt.utils.minions import ( + _MERGE_RESOURCE_FUNS, + _RESOURCE_INDEX_BANK, + _RESOURCE_INDEX_KEY, + _build_resource_index, + _get_resource_index, + _update_resource_index, +) +from tests.support.mock import MagicMock, patch + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +MINION_RESOURCES = { + "minion": { + "dummy": ["dummy-01", "dummy-02", "dummy-03"], + "ssh": ["node1", "localhost"], + } +} + +FLAT_INDEX = _build_resource_index(MINION_RESOURCES) + + +@pytest.fixture(autouse=True) +def reset_resource_index(): + """Reset the module-level index before each test.""" + salt.utils.minions._resource_index = {"by_id": {}, "by_type": {}, "by_minion": {}} + salt.utils.minions._resource_index_ts = 0.0 + yield + salt.utils.minions._resource_index = {"by_id": {}, "by_type": {}, "by_minion": {}} + salt.utils.minions._resource_index_ts = 0.0 + + +@pytest.fixture +def mock_cache(): + """A cache mock that returns the flat index.""" + cache = MagicMock() + cache.fetch.return_value = FLAT_INDEX + cache.store.return_value = None + return cache + + +@pytest.fixture +def empty_cache(): + """A cache mock that returns nothing.""" + cache = MagicMock() + cache.fetch.return_value = {} + cache.store.return_value = None + return cache + + +@pytest.fixture +def ck(master_opts): + """CkMinions instance with a mocked cache (empty index).""" + instance = salt.utils.minions.CkMinions(master_opts) + instance.cache = MagicMock() + instance.cache.fetch.return_value = {} + instance.cache.store.return_value = None + return instance + + +@pytest.fixture +def ck_with_resources(master_opts): + """CkMinions with the flat resource index populated.""" + instance = salt.utils.minions.CkMinions(master_opts) + instance.cache = MagicMock() + instance.cache.fetch.return_value = FLAT_INDEX + instance.cache.store.return_value = None + return instance + + +# --------------------------------------------------------------------------- +# _build_resource_index tests +# --------------------------------------------------------------------------- + + +def test_build_resource_index_by_id(): + index = _build_resource_index(MINION_RESOURCES) + assert index["by_id"]["dummy-01"] == {"minion": "minion", "type": "dummy"} + assert index["by_id"]["node1"] == {"minion": "minion", "type": "ssh"} + + +def test_build_resource_index_by_type(): + index = _build_resource_index(MINION_RESOURCES) + assert set(index["by_type"]["dummy"]) == {"dummy-01", "dummy-02", "dummy-03"} + assert set(index["by_type"]["ssh"]) == {"node1", "localhost"} + + +def test_build_resource_index_by_minion(): + index = _build_resource_index(MINION_RESOURCES) + assert index["by_minion"]["minion"]["dummy"] == ["dummy-01", "dummy-02", "dummy-03"] + + +def test_build_resource_index_empty(): + index = _build_resource_index({}) + assert index == {"by_id": {}, "by_type": {}, "by_minion": {}} + + +# --------------------------------------------------------------------------- +# _get_resource_index tests +# --------------------------------------------------------------------------- + + +def test_get_resource_index_loads_from_cache(mock_cache): + index = _get_resource_index(mock_cache) + mock_cache.fetch.assert_called_once_with(_RESOURCE_INDEX_BANK, _RESOURCE_INDEX_KEY) + assert index["by_id"]["dummy-01"]["type"] == "dummy" + + +def test_get_resource_index_caches_in_process(mock_cache): + _get_resource_index(mock_cache) + _get_resource_index(mock_cache) + assert ( + mock_cache.fetch.call_count == 1 + ), "cache.fetch should only be called once within TTL" + + +def test_get_resource_index_refreshes_after_ttl(mock_cache): + _get_resource_index(mock_cache) + salt.utils.minions._resource_index_ts = 0.0 # force TTL expiry + _get_resource_index(mock_cache) + assert mock_cache.fetch.call_count == 2 + + +def test_get_resource_index_cache_error_returns_empty(): + bad_cache = MagicMock() + bad_cache.fetch.side_effect = Exception("cache unavailable") + index = _get_resource_index(bad_cache) + assert index == {"by_id": {}, "by_type": {}, "by_minion": {}} + + +# --------------------------------------------------------------------------- +# _update_resource_index tests +# --------------------------------------------------------------------------- + + +def test_update_resource_index_adds_minion(mock_cache): + new_resources = {"dummy": ["dummy-99"]} + _update_resource_index(mock_cache, "minion-b", new_resources) + assert "dummy-99" in salt.utils.minions._resource_index["by_id"] + assert "minion-b" in salt.utils.minions._resource_index["by_minion"] + + +def test_update_resource_index_removes_minion(mock_cache): + salt.utils.minions._resource_index = _build_resource_index(MINION_RESOURCES) + _update_resource_index(mock_cache, "minion", {}) + assert "minion" not in salt.utils.minions._resource_index["by_minion"] + assert "dummy-01" not in salt.utils.minions._resource_index["by_id"] + assert "dummy" not in salt.utils.minions._resource_index["by_type"] + + +def test_update_resource_index_persists_to_cache(mock_cache): + _update_resource_index(mock_cache, "minion", {"dummy": ["dummy-01"]}) + mock_cache.store.assert_called_once_with( + _RESOURCE_INDEX_BANK, _RESOURCE_INDEX_KEY, salt.utils.minions._resource_index + ) + + +def test_update_resource_index_surgical_preserves_other_minions(mock_cache): + """ + Updating one minion must not disturb other minions' entries — this verifies + the surgical O(r) update does not rebuild from scratch. + """ + two_minions = { + "minion-a": {"dummy": ["dummy-01", "dummy-02"]}, + "minion-b": {"ssh": ["node1"]}, + } + salt.utils.minions._resource_index = _build_resource_index(two_minions) + # Update only minion-a, removing dummy-01 + _update_resource_index(mock_cache, "minion-a", {"dummy": ["dummy-02"]}) + index = salt.utils.minions._resource_index + assert "dummy-01" not in index["by_id"] + assert "dummy-02" in index["by_id"] + # minion-b must be untouched + assert "node1" in index["by_id"] + assert index["by_id"]["node1"]["minion"] == "minion-b" + assert "minion-b" in index["by_minion"] + + +def test_update_resource_index_removes_empty_type(mock_cache): + """ + When the last resource of a type is removed the by_type entry must be + deleted entirely, not left as an empty list. + """ + salt.utils.minions._resource_index = _build_resource_index( + {"minion": {"ssh": ["node1"]}} + ) + _update_resource_index(mock_cache, "minion", {}) + assert "ssh" not in salt.utils.minions._resource_index["by_type"] + + +def test_update_resource_index_partial_type_removal(mock_cache): + """ + Removing one resource of a type must leave the remaining resources of that + type intact in by_type. + """ + salt.utils.minions._resource_index = _build_resource_index( + {"minion": {"dummy": ["dummy-01", "dummy-02"]}} + ) + _update_resource_index(mock_cache, "minion", {"dummy": ["dummy-02"]}) + index = salt.utils.minions._resource_index + assert "dummy-01" not in index["by_type"]["dummy"] + assert "dummy-02" in index["by_type"]["dummy"] + + +def test_update_resource_index_no_duplicate_by_type(mock_cache): + """ + Re-registering the same resources must not produce duplicates in by_type. + """ + salt.utils.minions._resource_index = _build_resource_index( + {"minion": {"dummy": ["dummy-01"]}} + ) + _update_resource_index(mock_cache, "minion", {"dummy": ["dummy-01"]}) + assert salt.utils.minions._resource_index["by_type"]["dummy"].count("dummy-01") == 1 + + +# --------------------------------------------------------------------------- +# _augment_with_resources tests +# --------------------------------------------------------------------------- + + +def test_augment_with_resources_adds_resource_ids(ck_with_resources): + result = ck_with_resources._augment_with_resources(["minion"]) + assert "dummy-01" in result + assert "node1" in result + assert "minion" in result + + +def test_augment_with_resources_no_duplication(ck_with_resources): + result = ck_with_resources._augment_with_resources(["minion"]) + assert result.count("minion") == 1 + + +def test_augment_with_resources_empty_index(ck): + result = ck._augment_with_resources(["minion"]) + assert result == ["minion"] + + +def test_augment_with_resources_unmatched_minion(ck_with_resources): + result = ck_with_resources._augment_with_resources(["other-minion"]) + assert result == ["other-minion"] + + +def test_augment_with_resources_index_error_returns_minion_ids(ck): + ck.cache.fetch.side_effect = Exception("cache unavailable") + result = ck._augment_with_resources(["minion"]) + assert result == ["minion"] + + +# --------------------------------------------------------------------------- +# _check_resource_minions tests +# --------------------------------------------------------------------------- + + +def test_check_resource_minions_full_srn(ck_with_resources): + result = ck_with_resources._check_resource_minions("dummy:dummy-01", greedy=True) + assert result == {"minions": ["dummy-01"], "missing": []} + + +def test_check_resource_minions_all_of_type(ck_with_resources): + result = ck_with_resources._check_resource_minions("dummy", greedy=True) + assert set(result["minions"]) == {"dummy-01", "dummy-02", "dummy-03"} + + +def test_check_resource_minions_fallback_no_cache(ck): + result = ck._check_resource_minions("dummy:dummy-01", greedy=True) + assert result == {"minions": ["dummy-01"], "missing": []} + + +def test_check_resource_minions_empty_cache_bare_type(ck): + result = ck._check_resource_minions("dummy", greedy=True) + assert result == {"minions": [], "missing": []} + + +def test_check_resource_minions_trailing_colon(ck_with_resources): + result = ck_with_resources._check_resource_minions("dummy:", greedy=True) + assert set(result["minions"]) == {"dummy-01", "dummy-02", "dummy-03"} + + +# --------------------------------------------------------------------------- +# check_minions integration tests +# --------------------------------------------------------------------------- + + +def test_check_minions_glob_wildcard_augmented(ck_with_resources): + with patch.object( + ck_with_resources, + "_check_glob_minions", + return_value={"minions": ["minion"], "missing": []}, + ): + result = ck_with_resources.check_minions("*", tgt_type="glob") + assert "dummy-01" in result["minions"] + assert "node1" in result["minions"] + + +def test_check_minions_glob_specific_not_augmented(ck_with_resources): + with patch.object( + ck_with_resources, + "_check_glob_minions", + return_value={"minions": ["minion"], "missing": []}, + ): + result = ck_with_resources.check_minions("minion", tgt_type="glob") + assert "dummy-01" not in result["minions"] + + +def test_check_minions_compound_not_augmented(ck_with_resources): + with patch.object( + ck_with_resources, + "_check_compound_minions", + return_value={"minions": ["minion"], "missing": []}, + ): + result = ck_with_resources.check_minions( + "minion and G@os:Debian", tgt_type="compound" + ) + assert "dummy-01" not in result["minions"] + + +def test_augment_cache_error_does_not_break_check_minions(ck): + ck.cache.fetch.side_effect = Exception("cache driver failure") + with patch.object( + ck, + "_check_glob_minions", + return_value={"minions": ["minion"], "missing": []}, + ): + result = ck.check_minions("*", tgt_type="glob") + assert "minion" in result["minions"] + + +# --------------------------------------------------------------------------- +# check_minions merge-mode conditional tests +# --------------------------------------------------------------------------- + + +def test_check_minions_merge_fun_skips_augmentation(ck_with_resources): + """ + When fun is a merge-mode function (e.g. state.apply) a wildcard glob must + NOT augment the minion list with resource IDs. Resources are executed + inline by the managing minion and must not appear as separate job targets. + """ + with patch.object( + ck_with_resources, + "_check_glob_minions", + return_value={"minions": ["minion"], "missing": []}, + ): + result = ck_with_resources.check_minions( + "*", tgt_type="glob", fun="state.apply" + ) + assert "dummy-01" not in result["minions"] + assert "node1" not in result["minions"] + assert "minion" in result["minions"] + + +def test_check_minions_merge_fun_all_merge_funs_skip(ck_with_resources): + """All functions in _MERGE_RESOURCE_FUNS must skip resource augmentation.""" + for fun in _MERGE_RESOURCE_FUNS: + with patch.object( + ck_with_resources, + "_check_glob_minions", + return_value={"minions": ["minion"], "missing": []}, + ): + result = ck_with_resources.check_minions("*", tgt_type="glob", fun=fun) + assert "dummy-01" not in result["minions"], f"augmented for {fun}" + + +def test_check_minions_list_fun_still_augments(ck_with_resources): + """ + A multifunction job passes fun as a list. A list is not hashable and must + not cause a TypeError — augmentation must proceed normally. + """ + with patch.object( + ck_with_resources, + "_check_glob_minions", + return_value={"minions": ["minion"], "missing": []}, + ): + result = ck_with_resources.check_minions( + "*", tgt_type="glob", fun=["test.arg", "test.arg"] + ) + assert "dummy-01" in result["minions"] + assert "minion" in result["minions"] + + +def test_check_minions_non_merge_fun_still_augments(ck_with_resources): + """ + A non-merge function such as test.ping with a wildcard glob must still + receive the full augmented list including resource IDs. + """ + with patch.object( + ck_with_resources, + "_check_glob_minions", + return_value={"minions": ["minion"], "missing": []}, + ): + result = ck_with_resources.check_minions("*", tgt_type="glob", fun="test.ping") + assert "dummy-01" in result["minions"] + assert "node1" in result["minions"] + + +def test_check_minions_no_fun_still_augments(ck_with_resources): + """ + Calling check_minions without fun (backward-compatible default None) + must still augment a wildcard glob — behaviour must be unchanged for + all existing call sites that do not pass fun. + """ + with patch.object( + ck_with_resources, + "_check_glob_minions", + return_value={"minions": ["minion"], "missing": []}, + ): + result = ck_with_resources.check_minions("*", tgt_type="glob") + assert "dummy-01" in result["minions"] + assert "node1" in result["minions"] + + +def test_check_minions_merge_fun_compound_not_affected(ck_with_resources): + """ + The merge-mode skip only applies to wildcard globs. A compound + expression is never augmented regardless of fun. + """ + with patch.object( + ck_with_resources, + "_check_compound_minions", + return_value={"minions": ["minion"], "missing": []}, + ): + result = ck_with_resources.check_minions( + "G@os:Debian", tgt_type="compound", fun="test.ping" + ) + assert "dummy-01" not in result["minions"] diff --git a/tests/pytests/unit/utils/test_resources.py b/tests/pytests/unit/utils/test_resources.py new file mode 100644 index 000000000000..afe6ec0a40d6 --- /dev/null +++ b/tests/pytests/unit/utils/test_resources.py @@ -0,0 +1,44 @@ +""" +Tests for salt.utils.resources (configurable resource pillar key). +""" + +import logging + +import pytest + +import salt.utils.resources + + +def test_resource_pillar_key_default(): + assert salt.utils.resources.resource_pillar_key({}) == "resources" + assert salt.utils.resources.resource_pillar_key({"resource_pillar_key": "x"}) == "x" + + +@pytest.mark.parametrize("bad", ("", None)) +def test_resource_pillar_key_empty_warns_and_defaults(bad, caplog): + caplog.set_level(logging.WARNING) + assert ( + salt.utils.resources.resource_pillar_key({"resource_pillar_key": bad}) + == "resources" + ) + assert "resource_pillar_key is empty" in caplog.text + + +def test_pillar_resources_tree_default_key(): + opts = {"pillar": {"resources": {"ssh": {}}}} + assert salt.utils.resources.pillar_resources_tree(opts) == {"ssh": {}} + + +def test_pillar_resources_tree_custom_key(): + opts = {"resource_pillar_key": "my_res", "pillar": {"my_res": {"a": 1}}} + assert salt.utils.resources.pillar_resources_tree(opts) == {"a": 1} + + +def test_pillar_resources_tree_missing_key_same_as_empty(): + opts = {"pillar": {}} + assert salt.utils.resources.pillar_resources_tree(opts) == {} + + +def test_pillar_resources_tree_wrong_type(): + opts = {"pillar": {"resources": "bad"}} + assert salt.utils.resources.pillar_resources_tree(opts) == {} diff --git a/tests/support/sshd_runtime.py b/tests/support/sshd_runtime.py new file mode 100644 index 000000000000..3feb5afac9c9 --- /dev/null +++ b/tests/support/sshd_runtime.py @@ -0,0 +1,99 @@ +""" +Helpers for test-suite ``sshd`` instances. + +Minimal container images (e.g. Ubuntu CI) often ship OpenSSH without creating the +runtime directory used for privilege separation (commonly ``/run/sshd``). If that +directory is missing, ``sshd`` exits immediately with: + + Missing privilege separation directory: /run/sshd + +which surfaces as :class:`~pytestshellutils.exceptions.FactoryNotStarted` in tests. +""" + +from __future__ import annotations + +import logging +import os +import pathlib +import shutil +import subprocess +import sys + +log = logging.getLogger(__name__) + + +def ensure_sshd_privilege_separation_directories( + sshd_config_file: str | os.PathLike[str] | None = None, +) -> None: + """ + Ensure privilege-separation (and similar) directories exist before starting ``sshd``. + + * Prefer directories reported by ``sshd -T`` for the test config (portable across + distros / OpenSSH builds). + * If none are found, on Linux only, create ``/run/sshd`` — the usual expectation + on Debian/Ubuntu-family images when ``/run`` is tmpfs and the package postinst + did not run (typical in CI containers). + + Creating an unused empty directory is harmless; skipping non-Linux fallbacks + avoids touching paths that do not apply to macOS or Windows SSH test runs. + """ + if os.name == "nt": + return + + sshd = shutil.which("sshd") + if not sshd: + log.debug("sshd not in PATH; skipping privilege-separation directory setup") + return + + config_path: pathlib.Path | None = None + if sshd_config_file is not None: + p = pathlib.Path(sshd_config_file) + if p.is_file(): + config_path = p.resolve() + + cmd = [sshd, "-T"] + if config_path is not None: + cmd.extend(["-f", str(config_path)]) + + dirs: list[str] = [] + try: + proc = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=30, + check=False, + ) + if proc.returncode == 0: + for line in proc.stdout.splitlines(): + line = line.strip() + if not line or line.startswith("#"): + continue + parts = line.split(None, 1) + if len(parts) != 2: + continue + key, val = parts[0].lower(), parts[1].strip() + if "privsep" not in key: + continue + if val in ("none", "yes", "no", "sandbox"): + continue + if val.startswith("/"): + dirs.append(val) + else: + log.debug( + "sshd -T failed (rc=%s): %s", + proc.returncode, + (proc.stderr or proc.stdout or "").strip()[:500], + ) + except OSError as exc: + log.debug("Could not query sshd -T: %s", exc) + + if not dirs and sys.platform.startswith("linux") and os.path.isdir("/run"): + dirs.append("/run/sshd") + + for d in dirs: + try: + os.makedirs(d, mode=0o755, exist_ok=True) + log.debug("Ensured sshd runtime directory exists: %s", d) + except OSError as exc: + log.debug("Could not create %s: %s", d, exc)