X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/a7a16338702965de3ad0687470ef5beb2f42759b..10402f74ccdae453ae353c389a02c0415adcf1e7:/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py index f9dbd20e6b..c5dd1adef1 100644 --- a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py +++ b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py @@ -1,12 +1,18 @@ #!/usr/bin/env python +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: AGPL-3.0 from __future__ import absolute_import, print_function import functools import logging import time +import re import libcloud.common.types as cloud_types +from libcloud.common.exceptions import BaseHTTPError + import pykka from .. import \ @@ -16,6 +22,8 @@ from ...clientactor import _notify_subscribers from ... import config from .transitions import transitions +QuotaExceeded = "QuotaExceeded" + class ComputeNodeStateChangeBase(config.actor_class, RetryMixin): """Base class for actors that change a compute node's state. @@ -96,6 +104,7 @@ class ComputeNodeSetupActor(ComputeNodeStateChangeBase): self.cloud_size = cloud_size self.arvados_node = None self.cloud_node = None + self.error = None if arvados_node is None: self._later.create_arvados_node() else: @@ -119,10 +128,36 @@ class ComputeNodeSetupActor(ComputeNodeStateChangeBase): def create_cloud_node(self): self._logger.info("Sending create_node request for node size %s.", self.cloud_size.name) - self.cloud_node = self._cloud.create_node(self.cloud_size, - self.arvados_node) - if not self.cloud_node.size: - self.cloud_node.size = self.cloud_size + try: + self.cloud_node = self._cloud.create_node(self.cloud_size, + self.arvados_node) + except BaseHTTPError as e: + if e.code == 429 or "RequestLimitExceeded" in e.message: + # Don't consider API rate limits to be quota errors. + # re-raise so the Retry logic applies. + raise + + # The set of possible error codes / messages isn't documented for + # all clouds, so use a keyword heuristic to determine if the + # failure is likely due to a quota. + if re.search(r'(exceed|quota|limit)', e.message, re.I): + self.error = QuotaExceeded + self._logger.warning("Quota exceeded: %s", e) + self._finished() + return + else: + # Something else happened, re-raise so the Retry logic applies. + raise + except Exception as e: + raise + + # The information included in the node size object we get from libcloud + # is inconsistent between cloud drivers. Replace libcloud NodeSize + # object with compatible CloudSizeWrapper object which merges the size + # info reported from the cloud with size information from the + # configuration file. + self.cloud_node.size = self.cloud_size + self._logger.info("Cloud node %s created.", self.cloud_node.id) self._later.update_arvados_node_properties() @@ -171,7 +206,7 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase): """ # Reasons for a shutdown to be cancelled. WINDOW_CLOSED = "shutdown window closed" - NODE_BROKEN = "cloud failed to shut down broken node" + DESTROY_FAILED = "destroy_node failed" def __init__(self, timer_actor, cloud_client, arvados_client, node_monitor, cancellable=True, retry_wait=1, max_retry_wait=180): @@ -204,7 +239,10 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase): self.success = success_flag return super(ComputeNodeShutdownActor, self)._finished() - def cancel_shutdown(self, reason): + def cancel_shutdown(self, reason, **kwargs): + if self.cancel_reason is not None: + # already cancelled + return self.cancel_reason = reason self._logger.info("Shutdown cancelled: %s.", reason) self._finished(success_flag=False) @@ -216,26 +254,35 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase): return orig_func(self, *args, **kwargs) except Exception as error: self._logger.error("Actor error %s", error) - self._later.cancel_shutdown("Unhandled exception %s" % error) + self._logger.debug("", exc_info=True) + self._later.cancel_shutdown("Unhandled exception %s" % error, try_resume=False) return finish_wrapper @_cancel_on_exception - @RetryMixin._retry() def shutdown_node(self): - self._logger.info("Starting shutdown") - if not self._cloud.destroy_node(self.cloud_node): - if self._cloud.broken(self.cloud_node): - self._later.cancel_shutdown(self.NODE_BROKEN) + if self.cancel_reason is not None: + # already cancelled + return + if self.cancellable: + self._logger.info("Checking that node is still eligible for shutdown") + eligible, reason = self._monitor.shutdown_eligible().get() + if not eligible: + self.cancel_shutdown("No longer eligible for shut down because %s" % reason, + try_resume=True) return - else: - # Force a retry. - raise cloud_types.LibcloudError("destroy_node failed") - self._logger.info("Shutdown success") + self._destroy_node() + + def _destroy_node(self): + self._logger.info("Starting shutdown") arv_node = self._arvados_node() - if arv_node is None: - self._finished(success_flag=True) + if self._cloud.destroy_node(self.cloud_node): + self._logger.info("Shutdown success") + if arv_node: + self._later.clean_arvados_node(arv_node) + else: + self._finished(success_flag=True) else: - self._later.clean_arvados_node(arv_node) + self.cancel_shutdown(self.DESTROY_FAILED, try_resume=False) @ComputeNodeStateChangeBase._finish_on_exception @RetryMixin._retry(config.ARVADOS_ERRORS) @@ -244,7 +291,7 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase): self._finished(success_flag=True) -class ComputeNodeUpdateActor(config.actor_class): +class ComputeNodeUpdateActor(config.actor_class, RetryMixin): """Actor to dispatch one-off cloud management requests. This actor receives requests for small cloud updates, and @@ -253,12 +300,12 @@ class ComputeNodeUpdateActor(config.actor_class): dedicated actor for this gives us the opportunity to control the flow of requests; e.g., by backing off when errors occur. """ - def __init__(self, cloud_factory, max_retry_wait=180): + def __init__(self, cloud_factory, timer_actor, max_retry_wait=180): super(ComputeNodeUpdateActor, self).__init__() + RetryMixin.__init__(self, 1, max_retry_wait, + None, cloud_factory(), timer_actor) self._cloud = cloud_factory() - self.max_retry_wait = max_retry_wait - self.error_streak = 0 - self.next_request_time = time.time() + self._later = self.actor_ref.tell_proxy() def _set_logger(self): self._logger = logging.getLogger("%s.%s" % (self.__class__.__name__, self.actor_urn[33:])) @@ -266,28 +313,7 @@ class ComputeNodeUpdateActor(config.actor_class): def on_start(self): self._set_logger() - def _throttle_errors(orig_func): - @functools.wraps(orig_func) - def throttle_wrapper(self, *args, **kwargs): - throttle_time = self.next_request_time - time.time() - if throttle_time > 0: - time.sleep(throttle_time) - self.next_request_time = time.time() - try: - result = orig_func(self, *args, **kwargs) - except Exception as error: - if self._cloud.is_cloud_exception(error): - self.error_streak += 1 - self.next_request_time += min(2 ** self.error_streak, - self.max_retry_wait) - self._logger.warn( - "Unhandled exception: %s", error, exc_info=error) - else: - self.error_streak = 0 - return result - return throttle_wrapper - - @_throttle_errors + @RetryMixin._retry() def sync_node(self, cloud_node, arvados_node): return self._cloud.sync_node(cloud_node, arvados_node) @@ -306,7 +332,6 @@ class ComputeNodeMonitorActor(config.actor_class): ): super(ComputeNodeMonitorActor, self).__init__() self._later = self.actor_ref.tell_proxy() - self._last_log = None self._shutdowns = shutdown_timer self._cloud_node_fqdn = cloud_fqdn_func self._timer = timer_actor @@ -334,9 +359,6 @@ class ComputeNodeMonitorActor(config.actor_class): self.subscribers.add(subscriber) def _debug(self, msg, *args): - if msg == self._last_log: - return - self._last_log = msg self._logger.debug(msg, *args) def get_state(self): @@ -371,8 +393,13 @@ class ComputeNodeMonitorActor(config.actor_class): if arvados_node_missing(self.arvados_node, self.node_stale_after): state = 'down' - if state == 'idle' and self.arvados_node['job_uuid']: - state = 'busy' + # Turns out using 'job_uuid' this way is a bad idea. The node record + # is assigned the job_uuid before the job is locked (which removes it + # from the queue) which means the job will be double-counted as both in + # the wishlist and but also keeping a node busy. This end result is + # excess nodes being booted. + #if state == 'idle' and self.arvados_node['job_uuid']: + # state = 'busy' return state