3 from __future__ import absolute_import, print_function
9 ComputeNodeSetupActor, ComputeNodeUpdateActor
10 from . import ComputeNodeShutdownActor as ShutdownActorBase
11 from . import ComputeNodeMonitorActor as MonitorActorBase
12 from .. import RetryMixin
14 class SlurmMixin(object):
15 SLURM_END_STATES = frozenset(['down\n', 'down*\n',
16 'drain\n', 'drain*\n',
18 SLURM_DRAIN_STATES = frozenset(['drain\n', 'drng\n'])
20 def _set_node_state(self, nodename, state, *args):
21 cmd = ['scontrol', 'update', 'NodeName=' + nodename,
24 subprocess.check_output(cmd)
26 def _get_slurm_state(self, nodename):
27 return subprocess.check_output(['sinfo', '--noheader', '-o', '%t', '-n', nodename])
30 class ComputeNodeShutdownActor(SlurmMixin, ShutdownActorBase):
32 arv_node = self._arvados_node()
35 return super(ComputeNodeShutdownActor, self).on_start()
38 self._nodename = arv_node['hostname']
39 self._logger.info("Draining SLURM node %s", self._nodename)
40 self._later.issue_slurm_drain()
42 @RetryMixin._retry((subprocess.CalledProcessError,))
43 def cancel_shutdown(self, reason):
45 if self._get_slurm_state(self._nodename) in self.SLURM_DRAIN_STATES:
46 # Resume from "drng" or "drain"
47 self._set_node_state(self._nodename, 'RESUME')
49 # Node is in a state such as 'idle' or 'alloc' so don't
50 # try to resume it because that will just raise an error.
52 return super(ComputeNodeShutdownActor, self).cancel_shutdown(reason)
54 @RetryMixin._retry((subprocess.CalledProcessError,))
55 @ShutdownActorBase._stop_if_window_closed
56 def issue_slurm_drain(self):
57 self._set_node_state(self._nodename, 'DRAIN', 'Reason=Node Manager shutdown')
58 self._logger.info("Waiting for SLURM node %s to drain", self._nodename)
59 self._later.await_slurm_drain()
61 @RetryMixin._retry((subprocess.CalledProcessError,))
62 @ShutdownActorBase._stop_if_window_closed
63 def await_slurm_drain(self):
64 output = self._get_slurm_state(self._nodename)
65 if output in self.SLURM_END_STATES:
66 self._later.shutdown_node()
68 self._timer.schedule(time.time() + 10,
69 self._later.await_slurm_drain)
72 class ComputeNodeMonitorActor(SlurmMixin, MonitorActorBase):
74 def shutdown_eligible(self):
75 if self.arvados_node is not None:
76 state = self._get_slurm_state(self.arvados_node['hostname'])
77 # Automatically eligible for shutdown if it's down or failed, but
78 # not drain to avoid a race condition with resume_node().
79 if ((state in self.SLURM_END_STATES) and
80 (state not in self.SLURM_DRAIN_STATES)):
82 return super(ComputeNodeMonitorActor, self).shutdown_eligible()
84 def resume_node(self):
86 if (self.arvados_node is not None and
87 self._get_slurm_state(self.arvados_node['hostname']) in self.SLURM_DRAIN_STATES):
88 # Resume from "drng" or "drain"
89 self._set_node_state(self.arvados_node['hostname'], 'RESUME')
90 except Exception as error:
92 "Exception reenabling node: %s", error, exc_info=error)