from . import \
ComputeNodeSetupActor, ComputeNodeUpdateActor, ComputeNodeMonitorActor
from . import ComputeNodeShutdownActor as ShutdownActorBase
+from .. import RetryMixin
class ComputeNodeShutdownActor(ShutdownActorBase):
SLURM_END_STATES = frozenset(['down\n', 'down*\n',
self._nodename = None
return super(ComputeNodeShutdownActor, self).on_start()
else:
+ self._set_logger()
self._nodename = arv_node['hostname']
self._logger.info("Draining SLURM node %s", self._nodename)
self._later.issue_slurm_drain()
def _get_slurm_state(self):
return subprocess.check_output(['sinfo', '--noheader', '-o', '%t', '-n', self._nodename])
- @ShutdownActorBase._retry((subprocess.CalledProcessError,))
- def cancel_shutdown(self):
+ # The following methods retry on OSError. This is intended to mitigate bug
+ # #6321 where fork() of node manager raises "OSError: [Errno 12] Cannot
+ # allocate memory" resulting in the untimely death of the shutdown actor
+ # and tends to result in node manager getting into a wedged state where it
+ # won't allocate new nodes or shut down gracefully. The underlying causes
+ # of the excessive memory usage that result in the "Cannot allocate memory"
+ # error are still being investigated.
+
+ @RetryMixin._retry((subprocess.CalledProcessError, OSError))
+ def cancel_shutdown(self, reason):
if self._nodename:
- try:
+ if self._get_slurm_state() in self.SLURM_DRAIN_STATES:
+ # Resume from "drng" or "drain"
self._set_node_state('RESUME')
- except subprocess.CalledProcessError:
- slum_state = self._get_slurm_state()
- if slum_state in self.SLURM_DRAIN_STATES:
- # We expect to be able to resume from "drain" or "drng"
- # So if scontrol exited non-zero, something actually failed, so
- # raise an exception to signal the retry to kick in.
- raise
- else:
- # Assume scontrol exited non-zero because the node is already in
- # 'idle' or 'alloc' (so it never started draining)
- # we don't need to do anything else resume it.
- pass
- return super(ComputeNodeShutdownActor, self).cancel_shutdown()
+ else:
+ # Node is in a state such as 'idle' or 'alloc' so don't
+ # try to resume it because that will just raise an error.
+ pass
+ return super(ComputeNodeShutdownActor, self).cancel_shutdown(reason)
+ @RetryMixin._retry((subprocess.CalledProcessError, OSError))
@ShutdownActorBase._stop_if_window_closed
- @ShutdownActorBase._retry((subprocess.CalledProcessError,))
def issue_slurm_drain(self):
self._set_node_state('DRAIN', 'Reason=Node Manager shutdown')
self._logger.info("Waiting for SLURM node %s to drain", self._nodename)
self._later.await_slurm_drain()
+ @RetryMixin._retry((subprocess.CalledProcessError, OSError))
@ShutdownActorBase._stop_if_window_closed
- @ShutdownActorBase._retry((subprocess.CalledProcessError,))
def await_slurm_drain(self):
output = self._get_slurm_state()
if output in self.SLURM_END_STATES: