Merge branch '7667-node-manager-logging' refs #7667
[arvados.git] / services / nodemanager / arvnodeman / computenode / dispatch / slurm.py
index ec5014e9f9cf1e8848353cf3c755e22875227850..4d70436801564e9a35675e95c18f33fddc125806 100644 (file)
@@ -8,6 +8,7 @@ import time
 from . import \
     ComputeNodeSetupActor, ComputeNodeUpdateActor, ComputeNodeMonitorActor
 from . import ComputeNodeShutdownActor as ShutdownActorBase
+from .. import RetryMixin
 
 class ComputeNodeShutdownActor(ShutdownActorBase):
     SLURM_END_STATES = frozenset(['down\n', 'down*\n',
@@ -21,6 +22,7 @@ class ComputeNodeShutdownActor(ShutdownActorBase):
             self._nodename = None
             return super(ComputeNodeShutdownActor, self).on_start()
         else:
+            self._set_logger()
             self._nodename = arv_node['hostname']
             self._logger.info("Draining SLURM node %s", self._nodename)
             self._later.issue_slurm_drain()
@@ -42,8 +44,8 @@ class ComputeNodeShutdownActor(ShutdownActorBase):
     # of the excessive memory usage that result in the "Cannot allocate memory"
     # error are still being investigated.
 
-    @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError))
-    def cancel_shutdown(self):
+    @RetryMixin._retry((subprocess.CalledProcessError, OSError))
+    def cancel_shutdown(self, reason):
         if self._nodename:
             if self._get_slurm_state() in self.SLURM_DRAIN_STATES:
                 # Resume from "drng" or "drain"
@@ -52,16 +54,16 @@ class ComputeNodeShutdownActor(ShutdownActorBase):
                 # Node is in a state such as 'idle' or 'alloc' so don't
                 # try to resume it because that will just raise an error.
                 pass
-        return super(ComputeNodeShutdownActor, self).cancel_shutdown()
+        return super(ComputeNodeShutdownActor, self).cancel_shutdown(reason)
 
-    @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError))
+    @RetryMixin._retry((subprocess.CalledProcessError, OSError))
     @ShutdownActorBase._stop_if_window_closed
     def issue_slurm_drain(self):
         self._set_node_state('DRAIN', 'Reason=Node Manager shutdown')
         self._logger.info("Waiting for SLURM node %s to drain", self._nodename)
         self._later.await_slurm_drain()
 
-    @ShutdownActorBase._retry((subprocess.CalledProcessError, OSError))
+    @RetryMixin._retry((subprocess.CalledProcessError, OSError))
     @ShutdownActorBase._stop_if_window_closed
     def await_slurm_drain(self):
         output = self._get_slurm_state()