9303: Fetch arv_node before trying to shut down node, because monitor actor may
[arvados.git] / services / nodemanager / arvnodeman / computenode / dispatch / __init__.py
index a2d24b3d3b93e616563b9264ab8c9b85f843fb59..a950210aa89c0bbf18e9df64815393bfae71c65a 100644 (file)
@@ -223,6 +223,7 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase):
     @RetryMixin._retry()
     def shutdown_node(self):
         self._logger.info("Starting shutdown")
+        arv_node = self._arvados_node()
         if not self._cloud.destroy_node(self.cloud_node):
             if self._cloud.broken(self.cloud_node):
                 self._later.cancel_shutdown(self.NODE_BROKEN)
@@ -231,7 +232,6 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase):
                 # Force a retry.
                 raise cloud_types.LibcloudError("destroy_node failed")
         self._logger.info("Shutdown success")
-        arv_node = self._arvados_node()
         if arv_node is None:
             self._finished(success_flag=True)
         else:
@@ -339,65 +339,105 @@ class ComputeNodeMonitorActor(config.actor_class):
         self._last_log = msg
         self._logger.debug(msg, *args)
 
-    def in_state(self, *states):
-        # Return a boolean to say whether or not our Arvados node record is in
-        # one of the given states.  If state information is not
-        # available--because this node has no Arvados record, the record is
-        # stale, or the record has no state information--return None.
-        if (self.arvados_node is None) or not timestamp_fresh(
-              arvados_node_mtime(self.arvados_node), self.node_stale_after):
-            return None
+    def get_state(self):
+        """Get node state, one of ['unpaired', 'busy', 'idle', 'down']."""
+
+        # If this node is not associated with an Arvados node, return 'unpaired'.
+        if self.arvados_node is None:
+            return 'unpaired'
+
         state = self.arvados_node['crunch_worker_state']
-        if not state:
-            return None
-        result = state in states
-        if state == 'idle':
-            result = result and not self.arvados_node['job_uuid']
-        return result
 
-    def consider_shutdown(self):
-        try:
-            # Collect states and then consult state transition table
-            # whether we should shut down.  Possible states are:
-            #
-            # crunch_worker_state = ['unpaired', 'busy', 'idle', 'down']
-            # window = ["open", "closed"]
-            # boot_grace = ["boot wait", "boot exceeded"]
-            # idle_grace = ["not idle", "idle wait", "idle exceeded"]
-
-            if self.arvados_node is None:
-                crunch_worker_state = 'unpaired'
-            elif self.arvados_node['crunch_worker_state']:
-                crunch_worker_state = self.arvados_node['crunch_worker_state']
-            else:
-                self._debug("Node is paired but crunch_worker_state is null")
-                return
+        # If state information is not available because it is missing or the
+        # record is stale, return 'down'.
+        if not state or not timestamp_fresh(arvados_node_mtime(self.arvados_node),
+                                            self.node_stale_after):
+            state = 'down'
+
+        # There's a window between when a node pings for the first time and the
+        # value of 'slurm_state' is synchronized by crunch-dispatch.  In this
+        # window, the node will still report as 'down'.  Check that
+        # first_ping_at is truthy and consider the node 'idle' during the
+        # initial boot grace period.
+        if (state == 'down' and
+            self.arvados_node['first_ping_at'] and
+            timestamp_fresh(self.cloud_node_start_time,
+                            self.boot_fail_after) and
+            not self._cloud.broken(self.cloud_node)):
+            state = 'idle'
+
+        # "missing" means last_ping_at is stale, this should be
+        # considered "down"
+        if arvados_node_missing(self.arvados_node, self.node_stale_after):
+            state = 'down'
+
+        # Turns out using 'job_uuid' this way is a bad idea.  The node record
+        # is assigned the job_uuid before the job is locked (which removes it
+        # from the queue) which means the job will be double-counted as both in
+        # the wishlist and but also keeping a node busy.  This end result is
+        # excess nodes being booted.
+        #if state == 'idle' and self.arvados_node['job_uuid']:
+        #    state = 'busy'
+
+        return state
 
-            window = "open" if self._shutdowns.window_open() else "closed"
+    def in_state(self, *states):
+        return self.get_state() in states
 
-            if timestamp_fresh(self.cloud_node_start_time, self.boot_fail_after):
-                boot_grace = "boot wait"
-            else:
-                boot_grace = "boot exceeded"
+    def shutdown_eligible(self):
+        """Determine if node is candidate for shut down.
 
-            # API server side not implemented yet.
-            idle_grace = 'idle exceeded'
+        Returns a tuple of (boolean, string) where the first value is whether
+        the node is candidate for shut down, and the second value is the
+        reason for the decision.
+        """
+
+        # Collect states and then consult state transition table whether we
+        # should shut down.  Possible states are:
+        # crunch_worker_state = ['unpaired', 'busy', 'idle', 'down']
+        # window = ["open", "closed"]
+        # boot_grace = ["boot wait", "boot exceeded"]
+        # idle_grace = ["not idle", "idle wait", "idle exceeded"]
+
+        if self.arvados_node and not timestamp_fresh(arvados_node_mtime(self.arvados_node), self.node_stale_after):
+            return (False, "node state is stale")
+
+        crunch_worker_state = self.get_state()
+
+        window = "open" if self._shutdowns.window_open() else "closed"
+
+        if timestamp_fresh(self.cloud_node_start_time, self.boot_fail_after):
+            boot_grace = "boot wait"
+        else:
+            boot_grace = "boot exceeded"
+
+        # API server side not implemented yet.
+        idle_grace = 'idle exceeded'
 
-            node_state = (crunch_worker_state, window, boot_grace, idle_grace)
-            self._debug("Considering shutdown, node state is %s", node_state)
-            eligible = transitions[node_state]
+        node_state = (crunch_worker_state, window, boot_grace, idle_grace)
+        t = transitions[node_state]
+        if t is not None:
+            # yes, shutdown eligible
+            return (True, "node state is %s" % (node_state,))
+        else:
+            # no, return a reason
+            return (False, "node state is %s" % (node_state,))
 
+    def consider_shutdown(self):
+        try:
+            eligible, reason = self.shutdown_eligible()
             next_opening = self._shutdowns.next_opening()
             if eligible:
-                self._debug("Suggesting shutdown.")
+                self._debug("Suggesting shutdown because %s", reason)
                 _notify_subscribers(self.actor_ref.proxy(), self.subscribers)
-            elif self.last_shutdown_opening != next_opening:
-                self._debug("Shutdown window closed.  Next at %s.",
-                            time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(next_opening)))
-                self._timer.schedule(next_opening, self._later.consider_shutdown)
-                self.last_shutdown_opening = next_opening
             else:
-              self._debug("Won't shut down")
+                self._debug("Not eligible for shut down because %s", reason)
+
+                if self.last_shutdown_opening != next_opening:
+                    self._debug("Shutdown window closed.  Next at %s.",
+                                time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(next_opening)))
+                    self._timer.schedule(next_opening, self._later.consider_shutdown)
+                    self.last_shutdown_opening = next_opening
         except Exception:
             self._logger.exception("Unexpected exception")