#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
from __future__ import absolute_import, print_function
import functools
import logging
import time
+import re
import libcloud.common.types as cloud_types
+from libcloud.common.exceptions import BaseHTTPError
+
import pykka
from .. import \
arvados_node_missing, RetryMixin
from ...clientactor import _notify_subscribers
from ... import config
+from ... import status
from .transitions import transitions
+QuotaExceeded = "QuotaExceeded"
+
class ComputeNodeStateChangeBase(config.actor_class, RetryMixin):
"""Base class for actors that change a compute node's state.
Manager to handle).
"""
def __init__(self, timer_actor, arvados_client, cloud_client,
- cloud_size, arvados_node,
+ cloud_size, arvados_node=None,
retry_wait=1, max_retry_wait=180):
super(ComputeNodeSetupActor, self).__init__(
cloud_client, arvados_client, timer_actor,
self.cloud_size = cloud_size
self.arvados_node = None
self.cloud_node = None
- self._later.prepare_arvados_node(arvados_node)
+ self.error = None
+ if arvados_node is None:
+ self._later.create_arvados_node()
+ else:
+ self._later.prepare_arvados_node(arvados_node)
+
+ @ComputeNodeStateChangeBase._finish_on_exception
+ @RetryMixin._retry(config.ARVADOS_ERRORS)
+ def create_arvados_node(self):
+ self.arvados_node = self._arvados.nodes().create(
+ body={}, assign_slot=True).execute()
+ self._later.create_cloud_node()
@ComputeNodeStateChangeBase._finish_on_exception
@RetryMixin._retry(config.ARVADOS_ERRORS)
def prepare_arvados_node(self, node):
- self.arvados_node = self._clean_arvados_node(
- node, "Prepared by Node Manager")
+ self._clean_arvados_node(node, "Prepared by Node Manager")
+ self.arvados_node = self._arvados.nodes().update(
+ uuid=node['uuid'], body={}, assign_slot=True).execute()
self._later.create_cloud_node()
@ComputeNodeStateChangeBase._finish_on_exception
def create_cloud_node(self):
self._logger.info("Sending create_node request for node size %s.",
self.cloud_size.name)
- self.cloud_node = self._cloud.create_node(self.cloud_size,
- self.arvados_node)
- if not self.cloud_node.size:
- self.cloud_node.size = self.cloud_size
+ try:
+ self.cloud_node = self._cloud.create_node(self.cloud_size,
+ self.arvados_node)
+ except BaseHTTPError as e:
+ if e.code == 429 or "RequestLimitExceeded" in e.message:
+ # Don't consider API rate limits to be quota errors.
+ # re-raise so the Retry logic applies.
+ raise
+
+ # The set of possible error codes / messages isn't documented for
+ # all clouds, so use a keyword heuristic to determine if the
+ # failure is likely due to a quota.
+ if re.search(r'(exceed|quota|limit)', e.message, re.I):
+ self.error = QuotaExceeded
+ self._logger.warning("Quota exceeded: %s", e)
+ self._finished()
+ return
+ else:
+ # Something else happened, re-raise so the Retry logic applies.
+ raise
+ except Exception as e:
+ raise
+
+ # The information included in the node size object we get from libcloud
+ # is inconsistent between cloud drivers. Replace libcloud NodeSize
+ # object with compatible CloudSizeWrapper object which merges the size
+ # info reported from the cloud with size information from the
+ # configuration file.
+ self.cloud_node.size = self.cloud_size
+
self._logger.info("Cloud node %s created.", self.cloud_node.id)
self._later.update_arvados_node_properties()
"""
# Reasons for a shutdown to be cancelled.
WINDOW_CLOSED = "shutdown window closed"
- NODE_BROKEN = "cloud failed to shut down broken node"
+ DESTROY_FAILED = "destroy_node failed"
def __init__(self, timer_actor, cloud_client, arvados_client, node_monitor,
cancellable=True, retry_wait=1, max_retry_wait=180):
self.success = success_flag
return super(ComputeNodeShutdownActor, self)._finished()
- def cancel_shutdown(self, reason):
+ def cancel_shutdown(self, reason, **kwargs):
+ if self.cancel_reason is not None:
+ # already cancelled
+ return
self.cancel_reason = reason
self._logger.info("Shutdown cancelled: %s.", reason)
self._finished(success_flag=False)
return orig_func(self, *args, **kwargs)
except Exception as error:
self._logger.error("Actor error %s", error)
- self._later.cancel_shutdown("Unhandled exception %s" % error)
+ self._logger.debug("", exc_info=True)
+ self._later.cancel_shutdown("Unhandled exception %s" % error, try_resume=False)
return finish_wrapper
@_cancel_on_exception
- @RetryMixin._retry()
def shutdown_node(self):
- self._logger.info("Starting shutdown")
- if not self._cloud.destroy_node(self.cloud_node):
- if self._cloud.broken(self.cloud_node):
- self._later.cancel_shutdown(self.NODE_BROKEN)
+ if self.cancel_reason is not None:
+ # already cancelled
+ return
+ if self.cancellable:
+ self._logger.info("Checking that node is still eligible for shutdown")
+ eligible, reason = self._monitor.shutdown_eligible().get()
+ if not eligible:
+ self.cancel_shutdown("No longer eligible for shut down because %s" % reason,
+ try_resume=True)
return
- else:
- # Force a retry.
- raise cloud_types.LibcloudError("destroy_node failed")
- self._logger.info("Shutdown success")
+ # If boot failed, count the event
+ if self._monitor.get_state().get() == 'unpaired':
+ status.tracker.counter_add('boot_failures')
+ self._destroy_node()
+
+ def _destroy_node(self):
+ self._logger.info("Starting shutdown")
arv_node = self._arvados_node()
- if arv_node is None:
- self._finished(success_flag=True)
+ if self._cloud.destroy_node(self.cloud_node):
+ self._logger.info("Shutdown success")
+ if arv_node:
+ self._later.clean_arvados_node(arv_node)
+ else:
+ self._finished(success_flag=True)
else:
- self._later.clean_arvados_node(arv_node)
+ self.cancel_shutdown(self.DESTROY_FAILED, try_resume=False)
@ComputeNodeStateChangeBase._finish_on_exception
@RetryMixin._retry(config.ARVADOS_ERRORS)
self._finished(success_flag=True)
-class ComputeNodeUpdateActor(config.actor_class):
+class ComputeNodeUpdateActor(config.actor_class, RetryMixin):
"""Actor to dispatch one-off cloud management requests.
This actor receives requests for small cloud updates, and
dedicated actor for this gives us the opportunity to control the
flow of requests; e.g., by backing off when errors occur.
"""
- def __init__(self, cloud_factory, max_retry_wait=180):
+ def __init__(self, cloud_factory, timer_actor, max_retry_wait=180):
super(ComputeNodeUpdateActor, self).__init__()
+ RetryMixin.__init__(self, 1, max_retry_wait,
+ None, cloud_factory(), timer_actor)
self._cloud = cloud_factory()
- self.max_retry_wait = max_retry_wait
- self.error_streak = 0
- self.next_request_time = time.time()
+ self._later = self.actor_ref.tell_proxy()
def _set_logger(self):
self._logger = logging.getLogger("%s.%s" % (self.__class__.__name__, self.actor_urn[33:]))
def on_start(self):
self._set_logger()
- def _throttle_errors(orig_func):
- @functools.wraps(orig_func)
- def throttle_wrapper(self, *args, **kwargs):
- throttle_time = self.next_request_time - time.time()
- if throttle_time > 0:
- time.sleep(throttle_time)
- self.next_request_time = time.time()
- try:
- result = orig_func(self, *args, **kwargs)
- except Exception as error:
- if self._cloud.is_cloud_exception(error):
- self.error_streak += 1
- self.next_request_time += min(2 ** self.error_streak,
- self.max_retry_wait)
- self._logger.warn(
- "Unhandled exception: %s", error, exc_info=error)
- else:
- self.error_streak = 0
- return result
- return throttle_wrapper
-
- @_throttle_errors
+ @RetryMixin._retry()
def sync_node(self, cloud_node, arvados_node):
- return self._cloud.sync_node(cloud_node, arvados_node)
+ if self._cloud.node_fqdn(cloud_node) != arvados_node_fqdn(arvados_node):
+ return self._cloud.sync_node(cloud_node, arvados_node)
class ComputeNodeMonitorActor(config.actor_class):
for shutdown.
"""
def __init__(self, cloud_node, cloud_node_start_time, shutdown_timer,
- cloud_fqdn_func, timer_actor, update_actor, cloud_client,
+ timer_actor, update_actor, cloud_client,
arvados_node=None, poll_stale_after=600, node_stale_after=3600,
boot_fail_after=1800
):
super(ComputeNodeMonitorActor, self).__init__()
self._later = self.actor_ref.tell_proxy()
- self._last_log = None
self._shutdowns = shutdown_timer
- self._cloud_node_fqdn = cloud_fqdn_func
self._timer = timer_actor
self._update = update_actor
self._cloud = cloud_client
self.subscribers.add(subscriber)
def _debug(self, msg, *args):
- if msg == self._last_log:
- return
- self._last_log = msg
self._logger.debug(msg, *args)
- def in_state(self, *states):
- # Return a boolean to say whether or not our Arvados node record is in
- # one of the given states. If state information is not
- # available--because this node has no Arvados record, the record is
- # stale, or the record has no state information--return None.
- if (self.arvados_node is None) or not timestamp_fresh(
- arvados_node_mtime(self.arvados_node), self.node_stale_after):
- return None
+ def get_state(self):
+ """Get node state, one of ['unpaired', 'busy', 'idle', 'down']."""
+
+ # If this node is not associated with an Arvados node, return
+ # 'unpaired' if we're in the boot grace period, and 'down' if not,
+ # so it isn't counted towards usable nodes.
+ if self.arvados_node is None:
+ if timestamp_fresh(self.cloud_node_start_time,
+ self.boot_fail_after):
+ return 'unpaired'
+ else:
+ return 'down'
+
state = self.arvados_node['crunch_worker_state']
- if not state:
- return None
+
+ # If state information is not available because it is missing or the
+ # record is stale, return 'down'.
+ if not state or not timestamp_fresh(arvados_node_mtime(self.arvados_node),
+ self.node_stale_after):
+ state = 'down'
# There's a window between when a node pings for the first time and the
# value of 'slurm_state' is synchronized by crunch-dispatch. In this
if (state == 'down' and
self.arvados_node['first_ping_at'] and
timestamp_fresh(self.cloud_node_start_time,
- self.boot_fail_after)):
+ self.boot_fail_after) and
+ not self._cloud.broken(self.cloud_node)):
state = 'idle'
# "missing" means last_ping_at is stale, this should be
if arvados_node_missing(self.arvados_node, self.node_stale_after):
state = 'down'
- result = state in states
+ # Turns out using 'job_uuid' this way is a bad idea. The node record
+ # is assigned the job_uuid before the job is locked (which removes it
+ # from the queue) which means the job will be double-counted as both in
+ # the wishlist and but also keeping a node busy. This end result is
+ # excess nodes being booted.
+ #if state == 'idle' and self.arvados_node['job_uuid']:
+ # state = 'busy'
+
+ # Update idle node times tracker
if state == 'idle':
- result = result and not self.arvados_node['job_uuid']
- return result
+ status.tracker.idle_in(self.arvados_node['hostname'])
+ else:
+ status.tracker.idle_out(self.arvados_node['hostname'])
+
+ return state
+
+ def in_state(self, *states):
+ return self.get_state() in states
def shutdown_eligible(self):
"""Determine if node is candidate for shut down.
# boot_grace = ["boot wait", "boot exceeded"]
# idle_grace = ["not idle", "idle wait", "idle exceeded"]
- if self.arvados_node is None:
- crunch_worker_state = 'unpaired'
- elif not timestamp_fresh(arvados_node_mtime(self.arvados_node), self.node_stale_after):
+ if self.arvados_node and not timestamp_fresh(arvados_node_mtime(self.arvados_node), self.node_stale_after):
return (False, "node state is stale")
- elif self.in_state('down'):
- crunch_worker_state = 'down'
- elif self.in_state('idle'):
- crunch_worker_state = 'idle'
- elif self.in_state('busy'):
- crunch_worker_state = 'busy'
- else:
- return (False, "node is paired but crunch_worker_state is '%s'" % self.arvados_node['crunch_worker_state'])
+
+ crunch_worker_state = self.get_state()
window = "open" if self._shutdowns.window_open() else "closed"
self._later.consider_shutdown()
def update_arvados_node(self, arvados_node):
- # If the cloud node's FQDN doesn't match what's in the Arvados node
- # record, make them match.
+ """Called when the latest Arvados node record is retrieved.
+
+ Calls the updater's sync_node() method.
+
+ """
# This method is a little unusual in the way it just fires off the
# request without checking the result or retrying errors. That's
# because this update happens every time we reload the Arvados node
# the logic to throttle those effective retries when there's trouble.
if arvados_node is not None:
self.arvados_node = arvados_node
- if (self._cloud_node_fqdn(self.cloud_node) !=
- arvados_node_fqdn(self.arvados_node)):
- self._update.sync_node(self.cloud_node, self.arvados_node)
+ self._update.sync_node(self.cloud_node, self.arvados_node)
self._later.consider_shutdown()