X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/caacfc031998dc73cd2f4c767e1a746b7783d379..a6b15a15c6edb39d17ce79d71ec7b0816d7dcc0f:/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py index 8674f168f7..a950210aa8 100644 --- a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py +++ b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py @@ -223,6 +223,7 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase): @RetryMixin._retry() def shutdown_node(self): self._logger.info("Starting shutdown") + arv_node = self._arvados_node() if not self._cloud.destroy_node(self.cloud_node): if self._cloud.broken(self.cloud_node): self._later.cancel_shutdown(self.NODE_BROKEN) @@ -231,7 +232,6 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase): # Force a retry. raise cloud_types.LibcloudError("destroy_node failed") self._logger.info("Shutdown success") - arv_node = self._arvados_node() if arv_node is None: self._finished(success_flag=True) else: @@ -339,17 +339,20 @@ class ComputeNodeMonitorActor(config.actor_class): self._last_log = msg self._logger.debug(msg, *args) - def in_state(self, *states): - # Return a boolean to say whether or not our Arvados node record is in - # one of the given states. If state information is not - # available--because this node has no Arvados record, the record is - # stale, or the record has no state information--return None. - if (self.arvados_node is None) or not timestamp_fresh( - arvados_node_mtime(self.arvados_node), self.node_stale_after): - return None + def get_state(self): + """Get node state, one of ['unpaired', 'busy', 'idle', 'down'].""" + + # If this node is not associated with an Arvados node, return 'unpaired'. + if self.arvados_node is None: + return 'unpaired' + state = self.arvados_node['crunch_worker_state'] - if not state: - return None + + # If state information is not available because it is missing or the + # record is stale, return 'down'. + if not state or not timestamp_fresh(arvados_node_mtime(self.arvados_node), + self.node_stale_after): + state = 'down' # There's a window between when a node pings for the first time and the # value of 'slurm_state' is synchronized by crunch-dispatch. In this @@ -368,11 +371,18 @@ class ComputeNodeMonitorActor(config.actor_class): if arvados_node_missing(self.arvados_node, self.node_stale_after): state = 'down' - result = state in states - if state == 'idle': - result = result and not self.arvados_node['job_uuid'] + # Turns out using 'job_uuid' this way is a bad idea. The node record + # is assigned the job_uuid before the job is locked (which removes it + # from the queue) which means the job will be double-counted as both in + # the wishlist and but also keeping a node busy. This end result is + # excess nodes being booted. + #if state == 'idle' and self.arvados_node['job_uuid']: + # state = 'busy' - return result + return state + + def in_state(self, *states): + return self.get_state() in states def shutdown_eligible(self): """Determine if node is candidate for shut down. @@ -389,18 +399,10 @@ class ComputeNodeMonitorActor(config.actor_class): # boot_grace = ["boot wait", "boot exceeded"] # idle_grace = ["not idle", "idle wait", "idle exceeded"] - if self.arvados_node is None: - crunch_worker_state = 'unpaired' - elif not timestamp_fresh(arvados_node_mtime(self.arvados_node), self.node_stale_after): + if self.arvados_node and not timestamp_fresh(arvados_node_mtime(self.arvados_node), self.node_stale_after): return (False, "node state is stale") - elif self.in_state('down'): - crunch_worker_state = 'down' - elif self.in_state('idle'): - crunch_worker_state = 'idle' - elif self.in_state('busy'): - crunch_worker_state = 'busy' - else: - return (False, "node is paired but crunch_worker_state is '%s'" % self.arvados_node['crunch_worker_state']) + + crunch_worker_state = self.get_state() window = "open" if self._shutdowns.window_open() else "closed"