@RetryMixin._retry()
def shutdown_node(self):
self._logger.info("Starting shutdown")
+ arv_node = self._arvados_node()
if not self._cloud.destroy_node(self.cloud_node):
if self._cloud.broken(self.cloud_node):
self._later.cancel_shutdown(self.NODE_BROKEN)
# Force a retry.
raise cloud_types.LibcloudError("destroy_node failed")
self._logger.info("Shutdown success")
- arv_node = self._arvados_node()
if arv_node is None:
self._finished(success_flag=True)
else:
self._last_log = msg
self._logger.debug(msg, *args)
- def in_state(self, *states):
- # Return a boolean to say whether or not our Arvados node record is in
- # one of the given states. If state information is not
- # available--because this node has no Arvados record, the record is
- # stale, or the record has no state information--return None.
- if (self.arvados_node is None) or not timestamp_fresh(
- arvados_node_mtime(self.arvados_node), self.node_stale_after):
- return None
+ def get_state(self):
+ """Get node state, one of ['unpaired', 'busy', 'idle', 'down']."""
+
+ # If this node is not associated with an Arvados node, return 'unpaired'.
+ if self.arvados_node is None:
+ return 'unpaired'
+
state = self.arvados_node['crunch_worker_state']
- if not state:
- return None
- result = state in states
- if state == 'idle':
- result = result and not self.arvados_node['job_uuid']
- return result
+
+ # If state information is not available because it is missing or the
+ # record is stale, return 'down'.
+ if not state or not timestamp_fresh(arvados_node_mtime(self.arvados_node),
+ self.node_stale_after):
+ state = 'down'
+
+ # There's a window between when a node pings for the first time and the
+ # value of 'slurm_state' is synchronized by crunch-dispatch. In this
+ # window, the node will still report as 'down'. Check that
+ # first_ping_at is truthy and consider the node 'idle' during the
+ # initial boot grace period.
+ if (state == 'down' and
+ self.arvados_node['first_ping_at'] and
+ timestamp_fresh(self.cloud_node_start_time,
+ self.boot_fail_after) and
+ not self._cloud.broken(self.cloud_node)):
+ state = 'idle'
+
+ # "missing" means last_ping_at is stale, this should be
+ # considered "down"
+ if arvados_node_missing(self.arvados_node, self.node_stale_after):
+ state = 'down'
+
+ # Turns out using 'job_uuid' this way is a bad idea. The node record
+ # is assigned the job_uuid before the job is locked (which removes it
+ # from the queue) which means the job will be double-counted as both in
+ # the wishlist and but also keeping a node busy. This end result is
+ # excess nodes being booted.
+ #if state == 'idle' and self.arvados_node['job_uuid']:
+ # state = 'busy'
+
+ return state
+
+ def in_state(self, *states):
+ return self.get_state() in states
def shutdown_eligible(self):
"""Determine if node is candidate for shut down.
# boot_grace = ["boot wait", "boot exceeded"]
# idle_grace = ["not idle", "idle wait", "idle exceeded"]
- if self.arvados_node is None:
- crunch_worker_state = 'unpaired'
- elif not timestamp_fresh(arvados_node_mtime(self.arvados_node), self.node_stale_after):
+ if self.arvados_node and not timestamp_fresh(arvados_node_mtime(self.arvados_node), self.node_stale_after):
return (False, "node state is stale")
- elif self.arvados_node['crunch_worker_state']:
- crunch_worker_state = self.arvados_node['crunch_worker_state']
- else:
- return (False, "node is paired but crunch_worker_state is '%s'" % self.arvados_node['crunch_worker_state'])
+
+ crunch_worker_state = self.get_state()
window = "open" if self._shutdowns.window_open() else "closed"