X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/6f31d0c9d4b6a0d3070e933a84cfd843722e81b1..3a2802166901386eba4c3bc9bf877a18346ba03c:/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py b/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py index 27397e5d50..4d70436801 100644 --- a/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py +++ b/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py @@ -8,13 +8,21 @@ import time from . import \ ComputeNodeSetupActor, ComputeNodeUpdateActor, ComputeNodeMonitorActor from . import ComputeNodeShutdownActor as ShutdownActorBase +from .. import RetryMixin class ComputeNodeShutdownActor(ShutdownActorBase): + SLURM_END_STATES = frozenset(['down\n', 'down*\n', + 'drain\n', 'drain*\n', + 'fail\n', 'fail*\n']) + SLURM_DRAIN_STATES = frozenset(['drain\n', 'drng\n']) + def on_start(self): - arv_node = self._monitor.arvados_node.get() + arv_node = self._arvados_node() if arv_node is None: + self._nodename = None return super(ComputeNodeShutdownActor, self).on_start() else: + self._set_logger() self._nodename = arv_node['hostname'] self._logger.info("Draining SLURM node %s", self._nodename) self._later.issue_slurm_drain() @@ -25,24 +33,41 @@ class ComputeNodeShutdownActor(ShutdownActorBase): cmd.extend(args) subprocess.check_output(cmd) - @ShutdownActorBase._retry((subprocess.CalledProcessError,)) - def cancel_shutdown(self): - self._set_node_state('RESUME') - return super(ComputeNodeShutdownActor, self).cancel_shutdown() + def _get_slurm_state(self): + return subprocess.check_output(['sinfo', '--noheader', '-o', '%t', '-n', self._nodename]) + + # The following methods retry on OSError. This is intended to mitigate bug + # #6321 where fork() of node manager raises "OSError: [Errno 12] Cannot + # allocate memory" resulting in the untimely death of the shutdown actor + # and tends to result in node manager getting into a wedged state where it + # won't allocate new nodes or shut down gracefully. The underlying causes + # of the excessive memory usage that result in the "Cannot allocate memory" + # error are still being investigated. + + @RetryMixin._retry((subprocess.CalledProcessError, OSError)) + def cancel_shutdown(self, reason): + if self._nodename: + if self._get_slurm_state() in self.SLURM_DRAIN_STATES: + # Resume from "drng" or "drain" + self._set_node_state('RESUME') + else: + # Node is in a state such as 'idle' or 'alloc' so don't + # try to resume it because that will just raise an error. + pass + return super(ComputeNodeShutdownActor, self).cancel_shutdown(reason) + @RetryMixin._retry((subprocess.CalledProcessError, OSError)) @ShutdownActorBase._stop_if_window_closed - @ShutdownActorBase._retry((subprocess.CalledProcessError,)) def issue_slurm_drain(self): self._set_node_state('DRAIN', 'Reason=Node Manager shutdown') self._logger.info("Waiting for SLURM node %s to drain", self._nodename) self._later.await_slurm_drain() + @RetryMixin._retry((subprocess.CalledProcessError, OSError)) @ShutdownActorBase._stop_if_window_closed - @ShutdownActorBase._retry((subprocess.CalledProcessError,)) def await_slurm_drain(self): - output = subprocess.check_output( - ['sinfo', '--noheader', '-o', '%t', '-n', self._nodename]) - if output == 'drain\n': + output = self._get_slurm_state() + if output in self.SLURM_END_STATES: self._later.shutdown_node() else: self._timer.schedule(time.time() + 10,