6321: Add test that OSError is caught from slurm subprocess invocations.
[arvados.git] / services / nodemanager / arvnodeman / computenode / dispatch / slurm.py
index bb397fa277d43d42e5b066d09198972c21e3371e..b4ed088e2169adbb1496f160539bf50194057e2d 100644 (file)
@@ -34,34 +34,27 @@ class ComputeNodeShutdownActor(ShutdownActorBase):
     def _get_slurm_state(self):
         return subprocess.check_output(['sinfo', '--noheader', '-o', '%t', '-n', self._nodename])
 
-    @ShutdownActorBase._retry((subprocess.CalledProcessError,))
+    @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError))
     def cancel_shutdown(self):
         if self._nodename:
-            try:
+            if self._get_slurm_state() in self.SLURM_DRAIN_STATES:
+                # Resume from "drng" or "drain"
                 self._set_node_state('RESUME')
-            except subprocess.CalledProcessError:
-                slum_state = self._get_slurm_state()
-                if slum_state in self.SLURM_DRAIN_STATES:
-                    # We expect to be able to resume from "drain" or "drng"
-                    # So if scontrol exited non-zero, something actually failed, so
-                    # raise an exception to signal the retry to kick in.
-                    raise
-                else:
-                    # Assume scontrol exited non-zero because the node is already in
-                    # 'idle' or 'alloc' (so it never started draining)
-                    # we don't need to do anything else resume it.
-                    pass
+            else:
+                # Node is in a state such as 'idle' or 'alloc' so don't
+                # try to resume it because that will just raise an error.
+                pass
         return super(ComputeNodeShutdownActor, self).cancel_shutdown()
 
+    @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError))
     @ShutdownActorBase._stop_if_window_closed
-    @ShutdownActorBase._retry((subprocess.CalledProcessError,))
     def issue_slurm_drain(self):
         self._set_node_state('DRAIN', 'Reason=Node Manager shutdown')
         self._logger.info("Waiting for SLURM node %s to drain", self._nodename)
         self._later.await_slurm_drain()
 
+    @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError))
     @ShutdownActorBase._stop_if_window_closed
-    @ShutdownActorBase._retry((subprocess.CalledProcessError,))
     def await_slurm_drain(self):
         output = self._get_slurm_state()
         if output in self.SLURM_END_STATES: