X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/9c95a4c2dcd650627d524513e1e18596c8533ac0..242d3272391baeb95eb0a5e4e51627c2d54e7bc6:/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py b/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py index bb397fa277..919b57f42c 100644 --- a/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py +++ b/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py @@ -34,34 +34,35 @@ class ComputeNodeShutdownActor(ShutdownActorBase): def _get_slurm_state(self): return subprocess.check_output(['sinfo', '--noheader', '-o', '%t', '-n', self._nodename]) - @ShutdownActorBase._retry((subprocess.CalledProcessError,)) - def cancel_shutdown(self): + # The following methods retry on OSError. This is intended to mitigate bug + # #6321 where fork() of node manager raises "OSError: [Errno 12] Cannot + # allocate memory" resulting in the untimely death of the shutdown actor + # and tends to result in node manager getting into a wedged state where it + # won't allocate new nodes or shut down gracefully. The underlying causes + # of the excessive memory usage that result in the "Cannot allocate memory" + # error are still being investigated. + + @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError)) + def cancel_shutdown(self, reason): if self._nodename: - try: + if self._get_slurm_state() in self.SLURM_DRAIN_STATES: + # Resume from "drng" or "drain" self._set_node_state('RESUME') - except subprocess.CalledProcessError: - slum_state = self._get_slurm_state() - if slum_state in self.SLURM_DRAIN_STATES: - # We expect to be able to resume from "drain" or "drng" - # So if scontrol exited non-zero, something actually failed, so - # raise an exception to signal the retry to kick in. - raise - else: - # Assume scontrol exited non-zero because the node is already in - # 'idle' or 'alloc' (so it never started draining) - # we don't need to do anything else resume it. - pass - return super(ComputeNodeShutdownActor, self).cancel_shutdown() + else: + # Node is in a state such as 'idle' or 'alloc' so don't + # try to resume it because that will just raise an error. + pass + return super(ComputeNodeShutdownActor, self).cancel_shutdown(reason) + @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError)) @ShutdownActorBase._stop_if_window_closed - @ShutdownActorBase._retry((subprocess.CalledProcessError,)) def issue_slurm_drain(self): self._set_node_state('DRAIN', 'Reason=Node Manager shutdown') self._logger.info("Waiting for SLURM node %s to drain", self._nodename) self._later.await_slurm_drain() + @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError)) @ShutdownActorBase._stop_if_window_closed - @ShutdownActorBase._retry((subprocess.CalledProcessError,)) def await_slurm_drain(self): output = self._get_slurm_state() if output in self.SLURM_END_STATES: