X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/2aef6ca08d80c0fd25d74ddb9ab52cf535a33d3e..10402f74ccdae453ae353c389a02c0415adcf1e7:/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py index 71f9083c01..c5dd1adef1 100644 --- a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py +++ b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py @@ -1,12 +1,18 @@ #!/usr/bin/env python +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: AGPL-3.0 from __future__ import absolute_import, print_function import functools import logging import time +import re import libcloud.common.types as cloud_types +from libcloud.common.exceptions import BaseHTTPError + import pykka from .. import \ @@ -16,6 +22,8 @@ from ...clientactor import _notify_subscribers from ... import config from .transitions import transitions +QuotaExceeded = "QuotaExceeded" + class ComputeNodeStateChangeBase(config.actor_class, RetryMixin): """Base class for actors that change a compute node's state. @@ -96,6 +104,7 @@ class ComputeNodeSetupActor(ComputeNodeStateChangeBase): self.cloud_size = cloud_size self.arvados_node = None self.cloud_node = None + self.error = None if arvados_node is None: self._later.create_arvados_node() else: @@ -119,10 +128,36 @@ class ComputeNodeSetupActor(ComputeNodeStateChangeBase): def create_cloud_node(self): self._logger.info("Sending create_node request for node size %s.", self.cloud_size.name) - self.cloud_node = self._cloud.create_node(self.cloud_size, - self.arvados_node) - if not self.cloud_node.size: - self.cloud_node.size = self.cloud_size + try: + self.cloud_node = self._cloud.create_node(self.cloud_size, + self.arvados_node) + except BaseHTTPError as e: + if e.code == 429 or "RequestLimitExceeded" in e.message: + # Don't consider API rate limits to be quota errors. + # re-raise so the Retry logic applies. + raise + + # The set of possible error codes / messages isn't documented for + # all clouds, so use a keyword heuristic to determine if the + # failure is likely due to a quota. + if re.search(r'(exceed|quota|limit)', e.message, re.I): + self.error = QuotaExceeded + self._logger.warning("Quota exceeded: %s", e) + self._finished() + return + else: + # Something else happened, re-raise so the Retry logic applies. + raise + except Exception as e: + raise + + # The information included in the node size object we get from libcloud + # is inconsistent between cloud drivers. Replace libcloud NodeSize + # object with compatible CloudSizeWrapper object which merges the size + # info reported from the cloud with size information from the + # configuration file. + self.cloud_node.size = self.cloud_size + self._logger.info("Cloud node %s created.", self.cloud_node.id) self._later.update_arvados_node_properties() @@ -205,6 +240,9 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase): return super(ComputeNodeShutdownActor, self)._finished() def cancel_shutdown(self, reason, **kwargs): + if self.cancel_reason is not None: + # already cancelled + return self.cancel_reason = reason self._logger.info("Shutdown cancelled: %s.", reason) self._finished(success_flag=False) @@ -222,6 +260,9 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase): @_cancel_on_exception def shutdown_node(self): + if self.cancel_reason is not None: + # already cancelled + return if self.cancellable: self._logger.info("Checking that node is still eligible for shutdown") eligible, reason = self._monitor.shutdown_eligible().get() @@ -250,7 +291,7 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase): self._finished(success_flag=True) -class ComputeNodeUpdateActor(config.actor_class): +class ComputeNodeUpdateActor(config.actor_class, RetryMixin): """Actor to dispatch one-off cloud management requests. This actor receives requests for small cloud updates, and @@ -259,12 +300,12 @@ class ComputeNodeUpdateActor(config.actor_class): dedicated actor for this gives us the opportunity to control the flow of requests; e.g., by backing off when errors occur. """ - def __init__(self, cloud_factory, max_retry_wait=180): + def __init__(self, cloud_factory, timer_actor, max_retry_wait=180): super(ComputeNodeUpdateActor, self).__init__() + RetryMixin.__init__(self, 1, max_retry_wait, + None, cloud_factory(), timer_actor) self._cloud = cloud_factory() - self.max_retry_wait = max_retry_wait - self.error_streak = 0 - self.next_request_time = time.time() + self._later = self.actor_ref.tell_proxy() def _set_logger(self): self._logger = logging.getLogger("%s.%s" % (self.__class__.__name__, self.actor_urn[33:])) @@ -272,28 +313,7 @@ class ComputeNodeUpdateActor(config.actor_class): def on_start(self): self._set_logger() - def _throttle_errors(orig_func): - @functools.wraps(orig_func) - def throttle_wrapper(self, *args, **kwargs): - throttle_time = self.next_request_time - time.time() - if throttle_time > 0: - time.sleep(throttle_time) - self.next_request_time = time.time() - try: - result = orig_func(self, *args, **kwargs) - except Exception as error: - if self._cloud.is_cloud_exception(error): - self.error_streak += 1 - self.next_request_time += min(2 ** self.error_streak, - self.max_retry_wait) - self._logger.warn( - "Unhandled exception: %s", error, exc_info=error) - else: - self.error_streak = 0 - return result - return throttle_wrapper - - @_throttle_errors + @RetryMixin._retry() def sync_node(self, cloud_node, arvados_node): return self._cloud.sync_node(cloud_node, arvados_node)