Merge branch 'pr/28'
[arvados.git] / services / nodemanager / arvnodeman / computenode / dispatch / slurm.py
1 #!/usr/bin/env python
2
3 from __future__ import absolute_import, print_function
4
5 import subprocess
6 import time
7
8 from . import \
9     ComputeNodeSetupActor, ComputeNodeUpdateActor, ComputeNodeMonitorActor
10 from . import ComputeNodeShutdownActor as ShutdownActorBase
11
12 class ComputeNodeShutdownActor(ShutdownActorBase):
13     SLURM_END_STATES = frozenset(['down\n', 'down*\n',
14                                   'drain\n', 'drain*\n',
15                                   'fail\n', 'fail*\n'])
16     SLURM_DRAIN_STATES = frozenset(['drain\n', 'drng\n'])
17
18     def on_start(self):
19         arv_node = self._arvados_node()
20         if arv_node is None:
21             self._nodename = None
22             return super(ComputeNodeShutdownActor, self).on_start()
23         else:
24             self._nodename = arv_node['hostname']
25             self._logger.info("Draining SLURM node %s", self._nodename)
26             self._later.issue_slurm_drain()
27
28     def _set_node_state(self, state, *args):
29         cmd = ['scontrol', 'update', 'NodeName=' + self._nodename,
30                'State=' + state]
31         cmd.extend(args)
32         subprocess.check_output(cmd)
33
34     def _get_slurm_state(self):
35         return subprocess.check_output(['sinfo', '--noheader', '-o', '%t', '-n', self._nodename])
36
37     # The following methods retry on OSError.  This is intended to mitigate bug
38     # #6321 where fork() of node manager raises "OSError: [Errno 12] Cannot
39     # allocate memory" resulting in the untimely death of the shutdown actor
40     # and tends to result in node manager getting into a wedged state where it
41     # won't allocate new nodes or shut down gracefully.  The underlying causes
42     # of the excessive memory usage that result in the "Cannot allocate memory"
43     # error are still being investigated.
44
45     @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError))
46     def cancel_shutdown(self):
47         if self._nodename:
48             if self._get_slurm_state() in self.SLURM_DRAIN_STATES:
49                 # Resume from "drng" or "drain"
50                 self._set_node_state('RESUME')
51             else:
52                 # Node is in a state such as 'idle' or 'alloc' so don't
53                 # try to resume it because that will just raise an error.
54                 pass
55         return super(ComputeNodeShutdownActor, self).cancel_shutdown()
56
57     @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError))
58     @ShutdownActorBase._stop_if_window_closed
59     def issue_slurm_drain(self):
60         self._set_node_state('DRAIN', 'Reason=Node Manager shutdown')
61         self._logger.info("Waiting for SLURM node %s to drain", self._nodename)
62         self._later.await_slurm_drain()
63
64     @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError))
65     @ShutdownActorBase._stop_if_window_closed
66     def await_slurm_drain(self):
67         output = self._get_slurm_state()
68         if output in self.SLURM_END_STATES:
69             self._later.shutdown_node()
70         else:
71             self._timer.schedule(time.time() + 10,
72                                  self._later.await_slurm_drain)