X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/55b017847fe87e00e7fb9c6bfd0444b83f8ca12c..0eb72b526bf8bbb011551ecf019f604e17a534f1:/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py index 552ed01b72..fb9a6bf214 100644 --- a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py +++ b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py @@ -1,12 +1,18 @@ #!/usr/bin/env python +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: AGPL-3.0 from __future__ import absolute_import, print_function import functools import logging import time +import re import libcloud.common.types as cloud_types +from libcloud.common.exceptions import BaseHTTPError + import pykka from .. import \ @@ -14,6 +20,9 @@ from .. import \ arvados_node_missing, RetryMixin from ...clientactor import _notify_subscribers from ... import config +from .transitions import transitions + +QuotaExceeded = "QuotaExceeded" class ComputeNodeStateChangeBase(config.actor_class, RetryMixin): """Base class for actors that change a compute node's state. @@ -95,6 +104,7 @@ class ComputeNodeSetupActor(ComputeNodeStateChangeBase): self.cloud_size = cloud_size self.arvados_node = None self.cloud_node = None + self.error = None if arvados_node is None: self._later.create_arvados_node() else: @@ -118,10 +128,36 @@ class ComputeNodeSetupActor(ComputeNodeStateChangeBase): def create_cloud_node(self): self._logger.info("Sending create_node request for node size %s.", self.cloud_size.name) - self.cloud_node = self._cloud.create_node(self.cloud_size, - self.arvados_node) - if not self.cloud_node.size: - self.cloud_node.size = self.cloud_size + try: + self.cloud_node = self._cloud.create_node(self.cloud_size, + self.arvados_node) + except BaseHTTPError as e: + if e.code == 429 or "RequestLimitExceeded" in e.message: + # Don't consider API rate limits to be quota errors. + # re-raise so the Retry logic applies. + raise + + # The set of possible error codes / messages isn't documented for + # all clouds, so use a keyword heuristic to determine if the + # failure is likely due to a quota. + if re.search(r'(exceed|quota|limit)', e.message, re.I): + self.error = QuotaExceeded + self._logger.warning("Quota exceeded: %s", e) + self._finished() + return + else: + # Something else happened, re-raise so the Retry logic applies. + raise + except Exception as e: + raise + + # The information included in the node size object we get from libcloud + # is inconsistent between cloud drivers. Replace libcloud NodeSize + # object with compatible CloudSizeWrapper object which merges the size + # info reported from the cloud with size information from the + # configuration file. + self.cloud_node.size = self.cloud_size + self._logger.info("Cloud node %s created.", self.cloud_node.id) self._later.update_arvados_node_properties() @@ -170,7 +206,7 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase): """ # Reasons for a shutdown to be cancelled. WINDOW_CLOSED = "shutdown window closed" - NODE_BROKEN = "cloud failed to shut down broken node" + DESTROY_FAILED = "destroy_node failed" def __init__(self, timer_actor, cloud_client, arvados_client, node_monitor, cancellable=True, retry_wait=1, max_retry_wait=180): @@ -203,22 +239,11 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase): self.success = success_flag return super(ComputeNodeShutdownActor, self)._finished() - def cancel_shutdown(self, reason): + def cancel_shutdown(self, reason, **kwargs): self.cancel_reason = reason self._logger.info("Shutdown cancelled: %s.", reason) self._finished(success_flag=False) - def _stop_if_window_closed(orig_func): - @functools.wraps(orig_func) - def stop_wrapper(self, *args, **kwargs): - if (self.cancellable and - (self._monitor.shutdown_eligible().get() is not True)): - self._later.cancel_shutdown(self.WINDOW_CLOSED) - return None - else: - return orig_func(self, *args, **kwargs) - return stop_wrapper - def _cancel_on_exception(orig_func): @functools.wraps(orig_func) def finish_wrapper(self, *args, **kwargs): @@ -226,27 +251,32 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase): return orig_func(self, *args, **kwargs) except Exception as error: self._logger.error("Actor error %s", error) - self._later.cancel_shutdown("Unhandled exception %s" % error) + self._logger.debug("", exc_info=True) + self._later.cancel_shutdown("Unhandled exception %s" % error, try_resume=False) return finish_wrapper @_cancel_on_exception - @_stop_if_window_closed - @RetryMixin._retry() def shutdown_node(self): - self._logger.info("Starting shutdown") - if not self._cloud.destroy_node(self.cloud_node): - if self._cloud.broken(self.cloud_node): - self._later.cancel_shutdown(self.NODE_BROKEN) + if self.cancellable: + self._logger.info("Checking that node is still eligible for shutdown") + eligible, reason = self._monitor.shutdown_eligible().get() + if not eligible: + self.cancel_shutdown("No longer eligible for shut down because %s" % reason, + try_resume=True) return - else: - # Force a retry. - raise cloud_types.LibcloudError("destroy_node failed") - self._logger.info("Shutdown success") + self._destroy_node() + + def _destroy_node(self): + self._logger.info("Starting shutdown") arv_node = self._arvados_node() - if arv_node is None: - self._finished(success_flag=True) + if self._cloud.destroy_node(self.cloud_node): + self._logger.info("Shutdown success") + if arv_node: + self._later.clean_arvados_node(arv_node) + else: + self._finished(success_flag=True) else: - self._later.clean_arvados_node(arv_node) + self.cancel_shutdown(self.DESTROY_FAILED, try_resume=False) @ComputeNodeStateChangeBase._finish_on_exception @RetryMixin._retry(config.ARVADOS_ERRORS) @@ -254,11 +284,8 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase): self._clean_arvados_node(arvados_node, "Shut down by Node Manager") self._finished(success_flag=True) - # Make the decorator available to subclasses. - _stop_if_window_closed = staticmethod(_stop_if_window_closed) - -class ComputeNodeUpdateActor(config.actor_class): +class ComputeNodeUpdateActor(config.actor_class, RetryMixin): """Actor to dispatch one-off cloud management requests. This actor receives requests for small cloud updates, and @@ -267,12 +294,12 @@ class ComputeNodeUpdateActor(config.actor_class): dedicated actor for this gives us the opportunity to control the flow of requests; e.g., by backing off when errors occur. """ - def __init__(self, cloud_factory, max_retry_wait=180): + def __init__(self, cloud_factory, timer_actor, max_retry_wait=180): super(ComputeNodeUpdateActor, self).__init__() + RetryMixin.__init__(self, 1, max_retry_wait, + None, cloud_factory(), timer_actor) self._cloud = cloud_factory() - self.max_retry_wait = max_retry_wait - self.error_streak = 0 - self.next_request_time = time.time() + self._later = self.actor_ref.tell_proxy() def _set_logger(self): self._logger = logging.getLogger("%s.%s" % (self.__class__.__name__, self.actor_urn[33:])) @@ -280,28 +307,7 @@ class ComputeNodeUpdateActor(config.actor_class): def on_start(self): self._set_logger() - def _throttle_errors(orig_func): - @functools.wraps(orig_func) - def throttle_wrapper(self, *args, **kwargs): - throttle_time = self.next_request_time - time.time() - if throttle_time > 0: - time.sleep(throttle_time) - self.next_request_time = time.time() - try: - result = orig_func(self, *args, **kwargs) - except Exception as error: - if self._cloud.is_cloud_exception(error): - self.error_streak += 1 - self.next_request_time += min(2 ** self.error_streak, - self.max_retry_wait) - self._logger.warn( - "Unhandled exception: %s", error, exc_info=error) - else: - self.error_streak = 0 - return result - return throttle_wrapper - - @_throttle_errors + @RetryMixin._retry() def sync_node(self, cloud_node, arvados_node): return self._cloud.sync_node(cloud_node, arvados_node) @@ -320,7 +326,6 @@ class ComputeNodeMonitorActor(config.actor_class): ): super(ComputeNodeMonitorActor, self).__init__() self._later = self.actor_ref.tell_proxy() - self._last_log = None self._shutdowns = shutdown_timer self._cloud_node_fqdn = cloud_fqdn_func self._timer = timer_actor @@ -348,72 +353,107 @@ class ComputeNodeMonitorActor(config.actor_class): self.subscribers.add(subscriber) def _debug(self, msg, *args): - if msg == self._last_log: - return - self._last_log = msg self._logger.debug(msg, *args) - def in_state(self, *states): - # Return a boolean to say whether or not our Arvados node record is in - # one of the given states. If state information is not - # available--because this node has no Arvados record, the record is - # stale, or the record has no state information--return None. - if (self.arvados_node is None) or not timestamp_fresh( - arvados_node_mtime(self.arvados_node), self.node_stale_after): - return None + def get_state(self): + """Get node state, one of ['unpaired', 'busy', 'idle', 'down'].""" + + # If this node is not associated with an Arvados node, return 'unpaired'. + if self.arvados_node is None: + return 'unpaired' + state = self.arvados_node['crunch_worker_state'] - if not state: - return None - result = state in states - if state == 'idle': - result = result and not self.arvados_node['job_uuid'] - return result + + # If state information is not available because it is missing or the + # record is stale, return 'down'. + if not state or not timestamp_fresh(arvados_node_mtime(self.arvados_node), + self.node_stale_after): + state = 'down' + + # There's a window between when a node pings for the first time and the + # value of 'slurm_state' is synchronized by crunch-dispatch. In this + # window, the node will still report as 'down'. Check that + # first_ping_at is truthy and consider the node 'idle' during the + # initial boot grace period. + if (state == 'down' and + self.arvados_node['first_ping_at'] and + timestamp_fresh(self.cloud_node_start_time, + self.boot_fail_after) and + not self._cloud.broken(self.cloud_node)): + state = 'idle' + + # "missing" means last_ping_at is stale, this should be + # considered "down" + if arvados_node_missing(self.arvados_node, self.node_stale_after): + state = 'down' + + # Turns out using 'job_uuid' this way is a bad idea. The node record + # is assigned the job_uuid before the job is locked (which removes it + # from the queue) which means the job will be double-counted as both in + # the wishlist and but also keeping a node busy. This end result is + # excess nodes being booted. + #if state == 'idle' and self.arvados_node['job_uuid']: + # state = 'busy' + + return state + + def in_state(self, *states): + return self.get_state() in states def shutdown_eligible(self): - """Return True if eligible for shutdown, or a string explaining why the node - is not eligible for shutdown.""" + """Determine if node is candidate for shut down. - if not self._shutdowns.window_open(): - return "shutdown window is not open." - if self.arvados_node is None: - # Node is unpaired. - # If it hasn't pinged Arvados after boot_fail seconds, shut it down - if timestamp_fresh(self.cloud_node_start_time, self.boot_fail_after): - return "node is still booting, will be considered a failed boot at %s" % time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.cloud_node_start_time + self.boot_fail_after)) - else: - return True - missing = arvados_node_missing(self.arvados_node, self.node_stale_after) - if missing and self._cloud.broken(self.cloud_node): - # Node is paired, but Arvados says it is missing and the cloud says the node - # is in an error state, so shut it down. - return True - if missing is None and self._cloud.broken(self.cloud_node): - self._logger.info( - "Cloud node considered 'broken' but paired node %s last_ping_at is None, " + - "cannot check node_stale_after (node may be shut down and we just haven't gotten the message yet).", - self.arvados_node['uuid']) - if self.in_state('idle'): - return True + Returns a tuple of (boolean, string) where the first value is whether + the node is candidate for shut down, and the second value is the + reason for the decision. + """ + + # Collect states and then consult state transition table whether we + # should shut down. Possible states are: + # crunch_worker_state = ['unpaired', 'busy', 'idle', 'down'] + # window = ["open", "closed"] + # boot_grace = ["boot wait", "boot exceeded"] + # idle_grace = ["not idle", "idle wait", "idle exceeded"] + + if self.arvados_node and not timestamp_fresh(arvados_node_mtime(self.arvados_node), self.node_stale_after): + return (False, "node state is stale") + + crunch_worker_state = self.get_state() + + window = "open" if self._shutdowns.window_open() else "closed" + + if timestamp_fresh(self.cloud_node_start_time, self.boot_fail_after): + boot_grace = "boot wait" else: - return "node is not idle." + boot_grace = "boot exceeded" + + # API server side not implemented yet. + idle_grace = 'idle exceeded' - def resume_node(self): - pass + node_state = (crunch_worker_state, window, boot_grace, idle_grace) + t = transitions[node_state] + if t is not None: + # yes, shutdown eligible + return (True, "node state is %s" % (node_state,)) + else: + # no, return a reason + return (False, "node state is %s" % (node_state,)) def consider_shutdown(self): try: + eligible, reason = self.shutdown_eligible() next_opening = self._shutdowns.next_opening() - eligible = self.shutdown_eligible() - if eligible is True: - self._debug("Suggesting shutdown.") + if eligible: + self._debug("Suggesting shutdown because %s", reason) _notify_subscribers(self.actor_ref.proxy(), self.subscribers) - elif self._shutdowns.window_open(): - self._debug("Cannot shut down because %s", eligible) - elif self.last_shutdown_opening != next_opening: - self._debug("Shutdown window closed. Next at %s.", - time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(next_opening))) - self._timer.schedule(next_opening, self._later.consider_shutdown) - self.last_shutdown_opening = next_opening + else: + self._debug("Not eligible for shut down because %s", reason) + + if self.last_shutdown_opening != next_opening: + self._debug("Shutdown window closed. Next at %s.", + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(next_opening))) + self._timer.schedule(next_opening, self._later.consider_shutdown) + self.last_shutdown_opening = next_opening except Exception: self._logger.exception("Unexpected exception")