5352: Set node size on records returned from libcloud create_node
[arvados.git] / services / nodemanager / arvnodeman / computenode / dispatch / slurm.py
index 225d856ba740cd207996d7a053e9169300b57561..919b57f42c8973bab91de742d1fee48598296f35 100644 (file)
@@ -13,6 +13,7 @@ class ComputeNodeShutdownActor(ShutdownActorBase):
     SLURM_END_STATES = frozenset(['down\n', 'down*\n',
                                   'drain\n', 'drain*\n',
                                   'fail\n', 'fail*\n'])
+    SLURM_DRAIN_STATES = frozenset(['drain\n', 'drng\n'])
 
     def on_start(self):
         arv_node = self._arvados_node()
@@ -30,24 +31,40 @@ class ComputeNodeShutdownActor(ShutdownActorBase):
         cmd.extend(args)
         subprocess.check_output(cmd)
 
-    @ShutdownActorBase._retry((subprocess.CalledProcessError,))
-    def cancel_shutdown(self):
+    def _get_slurm_state(self):
+        return subprocess.check_output(['sinfo', '--noheader', '-o', '%t', '-n', self._nodename])
+
+    # The following methods retry on OSError.  This is intended to mitigate bug
+    # #6321 where fork() of node manager raises "OSError: [Errno 12] Cannot
+    # allocate memory" resulting in the untimely death of the shutdown actor
+    # and tends to result in node manager getting into a wedged state where it
+    # won't allocate new nodes or shut down gracefully.  The underlying causes
+    # of the excessive memory usage that result in the "Cannot allocate memory"
+    # error are still being investigated.
+
+    @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError))
+    def cancel_shutdown(self, reason):
         if self._nodename:
-            self._set_node_state('RESUME')
-        return super(ComputeNodeShutdownActor, self).cancel_shutdown()
+            if self._get_slurm_state() in self.SLURM_DRAIN_STATES:
+                # Resume from "drng" or "drain"
+                self._set_node_state('RESUME')
+            else:
+                # Node is in a state such as 'idle' or 'alloc' so don't
+                # try to resume it because that will just raise an error.
+                pass
+        return super(ComputeNodeShutdownActor, self).cancel_shutdown(reason)
 
+    @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError))
     @ShutdownActorBase._stop_if_window_closed
-    @ShutdownActorBase._retry((subprocess.CalledProcessError,))
     def issue_slurm_drain(self):
         self._set_node_state('DRAIN', 'Reason=Node Manager shutdown')
         self._logger.info("Waiting for SLURM node %s to drain", self._nodename)
         self._later.await_slurm_drain()
 
+    @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError))
     @ShutdownActorBase._stop_if_window_closed
-    @ShutdownActorBase._retry((subprocess.CalledProcessError,))
     def await_slurm_drain(self):
-        output = subprocess.check_output(
-            ['sinfo', '--noheader', '-o', '%t', '-n', self._nodename])
+        output = self._get_slurm_state()
         if output in self.SLURM_END_STATES:
             self._later.shutdown_node()
         else: