X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/6f31d0c9d4b6a0d3070e933a84cfd843722e81b1..e94f86dc97746ef21c641aadb56112481d1e66f5:/services/nodemanager/arvnodeman/daemon.py diff --git a/services/nodemanager/arvnodeman/daemon.py b/services/nodemanager/arvnodeman/daemon.py index 9f22568faa..243d3bfaa4 100644 --- a/services/nodemanager/arvnodeman/daemon.py +++ b/services/nodemanager/arvnodeman/daemon.py @@ -25,15 +25,18 @@ class _BaseNodeTracker(object): def __init__(self): self.nodes = {} self.orphans = {} + self._blacklist = set() - def __getitem__(self, key): - return self.nodes[key] - - def __len__(self): - return len(self.nodes) + # Proxy the methods listed below to self.nodes. + def _proxy_method(name): + method = getattr(dict, name) + @functools.wraps(method, ('__name__', '__doc__')) + def wrapper(self, *args, **kwargs): + return method(self.nodes, *args, **kwargs) + return wrapper - def get(self, key, default=None): - return self.nodes.get(key, default) + for _method_name in ['__contains__', '__getitem__', '__len__', 'get']: + locals()[_method_name] = _proxy_method(_method_name) def record_key(self, record): return self.item_key(getattr(record, self.RECORD_ATTR)) @@ -41,6 +44,9 @@ class _BaseNodeTracker(object): def add(self, record): self.nodes[self.record_key(record)] = record + def blacklist(self, key): + self._blacklist.add(key) + def update_record(self, key, item): setattr(self.nodes[key], self.RECORD_ATTR, item) @@ -48,7 +54,9 @@ class _BaseNodeTracker(object): unseen = set(self.nodes.iterkeys()) for item in response: key = self.item_key(item) - if key in unseen: + if key in self._blacklist: + continue + elif key in unseen: unseen.remove(key) self.update_record(key, item) else: @@ -95,11 +103,15 @@ class NodeManagerDaemonActor(actor_class): def __init__(self, server_wishlist_actor, arvados_nodes_actor, cloud_nodes_actor, cloud_update_actor, timer_actor, arvados_factory, cloud_factory, - shutdown_windows, min_nodes, max_nodes, - poll_stale_after=600, node_stale_after=7200, + shutdown_windows, server_calculator, + min_nodes, max_nodes, + poll_stale_after=600, + boot_fail_after=1800, + node_stale_after=7200, node_setup_class=dispatch.ComputeNodeSetupActor, node_shutdown_class=dispatch.ComputeNodeShutdownActor, - node_actor_class=dispatch.ComputeNodeMonitorActor): + node_actor_class=dispatch.ComputeNodeMonitorActor, + max_total_price=0): super(NodeManagerDaemonActor, self).__init__() self._node_setup = node_setup_class self._node_shutdown = node_shutdown_class @@ -112,9 +124,13 @@ class NodeManagerDaemonActor(actor_class): self._logger = logging.getLogger('arvnodeman.daemon') self._later = self.actor_ref.proxy() self.shutdown_windows = shutdown_windows + self.server_calculator = server_calculator + self.min_cloud_size = self.server_calculator.cheapest_size() self.min_nodes = min_nodes self.max_nodes = max_nodes + self.max_total_price = max_total_price self.poll_stale_after = poll_stale_after + self.boot_fail_after = boot_fail_after self.node_stale_after = node_stale_after self.last_polls = {} for poll_name in ['server_wishlist', 'arvados_nodes', 'cloud_nodes']: @@ -148,11 +164,14 @@ class NodeManagerDaemonActor(actor_class): cloud_node=cloud_node, cloud_node_start_time=start_time, shutdown_timer=shutdown_timer, + cloud_fqdn_func=self._cloud_driver.node_fqdn, update_actor=self._cloud_updater, timer_actor=self._timer, arvados_node=None, poll_stale_after=self.poll_stale_after, - node_stale_after=self.node_stale_after).proxy() + node_stale_after=self.node_stale_after, + cloud_client=self._cloud_driver, + boot_fail_after=self.boot_fail_after).proxy() actor.subscribe(self._later.node_can_shutdown) self._cloud_nodes_actor.subscribe_to(cloud_node.id, actor.update_cloud_node) @@ -173,8 +192,14 @@ class NodeManagerDaemonActor(actor_class): self._pair_nodes(record, arv_rec.arvados_node) break for key, record in self.cloud_nodes.orphans.iteritems(): + if key in self.shutdowns: + try: + self.shutdowns[key].stop().get() + except pykka.ActorDeadError: + pass + del self.shutdowns[key] record.actor.stop() - self.shutdowns.pop(key, None) + record.cloud_node = None def update_arvados_nodes(self, nodelist): self._update_poll_time('arvados_nodes') @@ -189,41 +214,95 @@ class NodeManagerDaemonActor(actor_class): self._pair_nodes(cloud_rec, arv_node) break - def _nodes_up(self): - return sum(len(nodelist) for nodelist in - [self.cloud_nodes, self.booted, self.booting]) - - def _nodes_busy(self): - return sum(1 for idle in - pykka.get_all(rec.actor.in_state('idle') for rec in - self.cloud_nodes.nodes.itervalues()) - if idle is False) + def _nodes_up(self, size): + up = 0 + up += sum(1 + for c in self.booting.itervalues() + if size is None or c.cloud_size.get().id == size.id) + up += sum(1 + for i in (self.booted, self.cloud_nodes.nodes) + for c in i.itervalues() + if size is None or c.cloud_node.size.id == size.id) + return up + + def _total_price(self): + cost = 0 + cost += sum(self.server_calculator.find_size(c.cloud_size.get().id).price + for c in self.booting.itervalues()) + cost += sum(self.server_calculator.find_size(c.cloud_node.size.id).price + for i in (self.booted, self.cloud_nodes.nodes) + for c in i.itervalues()) + return cost + + def _nodes_busy(self, size): + return sum(1 for busy in + pykka.get_all(rec.actor.in_state('busy') for rec in + self.cloud_nodes.nodes.itervalues() + if rec.cloud_node.size.id == size.id) + if busy) + + def _nodes_missing(self, size): + return sum(1 for arv_node in + pykka.get_all(rec.actor.arvados_node for rec in + self.cloud_nodes.nodes.itervalues() + if rec.cloud_node.size.id == size.id and rec.actor.cloud_node.get().id not in self.shutdowns) + if arv_node and cnode.arvados_node_missing(arv_node, self.node_stale_after)) + + def _size_wishlist(self, size): + return sum(1 for c in self.last_wishlist if c.id == size.id) + + def _size_shutdowns(self, size): + sh = 0 + for c in self.shutdowns.itervalues(): + try: + if c.cloud_node.get().size.id == size.id: + sh += 1 + except pykka.ActorDeadError: + pass + return sh + + def _nodes_wanted(self, size): + total_up_count = self._nodes_up(None) + under_min = self.min_nodes - total_up_count + over_max = total_up_count - self.max_nodes + total_price = self._total_price() - def _nodes_wanted(self): - up_count = self._nodes_up() - over_max = up_count - self.max_nodes if over_max >= 0: return -over_max + elif under_min > 0 and size.id == self.min_cloud_size.id: + return under_min + + up_count = self._nodes_up(size) - (self._size_shutdowns(size) + + self._nodes_busy(size) + + self._nodes_missing(size)) + + self._logger.debug("%s: idle nodes %i, wishlist size %i", size.name, up_count, self._size_wishlist(size)) + + wanted = self._size_wishlist(size) - up_count + if wanted > 0 and self.max_total_price and ((total_price + (size.price*wanted)) > self.max_total_price): + can_boot = int((self.max_total_price - total_price) / size.price) + if can_boot == 0: + self._logger.info("Not booting %s (price %s) because with it would exceed max_total_price of %s (current total_price is %s)", + size.name, size.price, self.max_total_price, total_price) + return can_boot else: - up_count -= len(self.shutdowns) + self._nodes_busy() - return len(self.last_wishlist) - up_count - - def _nodes_excess(self): - up_count = self._nodes_up() - len(self.shutdowns) - over_min = up_count - self.min_nodes - if over_min <= 0: - return over_min - else: - return up_count - self._nodes_busy() - len(self.last_wishlist) + return wanted + + def _nodes_excess(self, size): + up_count = self._nodes_up(size) - self._size_shutdowns(size) + if size.id == self.min_cloud_size.id: + up_count -= self.min_nodes + return up_count - self._nodes_busy(size) - self._size_wishlist(size) def update_server_wishlist(self, wishlist): self._update_poll_time('server_wishlist') self.last_wishlist = wishlist - nodes_wanted = self._nodes_wanted() - if nodes_wanted > 0: - self._later.start_node() - elif (nodes_wanted < 0) and self.booting: - self._later.stop_booting_node() + for size in reversed(self.server_calculator.cloud_sizes): + nodes_wanted = self._nodes_wanted(size) + if nodes_wanted > 0: + self._later.start_node(size) + elif (nodes_wanted < 0) and self.booting: + self._later.stop_booting_node(size) def _check_poll_freshness(orig_func): """Decorator to inhibit a method when poll information is stale. @@ -243,12 +322,11 @@ class NodeManagerDaemonActor(actor_class): return wrapper @_check_poll_freshness - def start_node(self): - nodes_wanted = self._nodes_wanted() + def start_node(self, cloud_size): + nodes_wanted = self._nodes_wanted(cloud_size) if nodes_wanted < 1: return None arvados_node = self.arvados_nodes.find_stale_node(self.node_stale_after) - cloud_size = self.last_wishlist[nodes_wanted - 1] self._logger.info("Want %s more nodes. Booting a %s node.", nodes_wanted, cloud_size.name) new_setup = self._node_setup.start( @@ -263,54 +341,68 @@ class NodeManagerDaemonActor(actor_class): time.time()) new_setup.subscribe(self._later.node_up) if nodes_wanted > 1: - self._later.start_node() + self._later.start_node(cloud_size) def _get_actor_attrs(self, actor, *attr_names): return pykka.get_all([getattr(actor, name) for name in attr_names]) def node_up(self, setup_proxy): - cloud_node, arvados_node = self._get_actor_attrs( - setup_proxy, 'cloud_node', 'arvados_node') + cloud_node = setup_proxy.cloud_node.get() del self.booting[setup_proxy.actor_ref.actor_urn] setup_proxy.stop() record = self.cloud_nodes.get(cloud_node.id) if record is None: record = self._new_node(cloud_node) self.booted[cloud_node.id] = record - self._pair_nodes(record, arvados_node) + self._timer.schedule(time.time() + self.boot_fail_after, + self._later.shutdown_unpaired_node, cloud_node.id) @_check_poll_freshness - def stop_booting_node(self): - nodes_excess = self._nodes_excess() + def stop_booting_node(self, size): + nodes_excess = self._nodes_excess(size) if (nodes_excess < 1) or not self.booting: return None for key, node in self.booting.iteritems(): - node.stop_if_no_cloud_node().get() - if not node.actor_ref.is_alive(): + if node.cloud_size.get().id == size.id and node.stop_if_no_cloud_node().get(): del self.booting[key] if nodes_excess > 1: - self._later.stop_booting_node() + self._later.stop_booting_node(size) break - @_check_poll_freshness - def node_can_shutdown(self, node_actor): - if self._nodes_excess() < 1: - return None + def _begin_node_shutdown(self, node_actor, cancellable): cloud_node_id = node_actor.cloud_node.get().id if cloud_node_id in self.shutdowns: return None shutdown = self._node_shutdown.start( timer_actor=self._timer, cloud_client=self._new_cloud(), - node_monitor=node_actor.actor_ref).proxy() + arvados_client=self._new_arvados(), + node_monitor=node_actor.actor_ref, cancellable=cancellable).proxy() self.shutdowns[cloud_node_id] = shutdown shutdown.subscribe(self._later.node_finished_shutdown) + @_check_poll_freshness + def node_can_shutdown(self, node_actor): + if self._nodes_excess(node_actor.cloud_node.get().size) > 0: + self._begin_node_shutdown(node_actor, cancellable=True) + + def shutdown_unpaired_node(self, cloud_node_id): + for record_dict in [self.cloud_nodes, self.booted]: + if cloud_node_id in record_dict: + record = record_dict[cloud_node_id] + break + else: + return None + if not record.actor.in_state('idle', 'busy').get(): + self._begin_node_shutdown(record.actor, cancellable=False) + def node_finished_shutdown(self, shutdown_actor): - success, cloud_node = self._get_actor_attrs(shutdown_actor, 'success', - 'cloud_node') + cloud_node, success, cancel_reason = self._get_actor_attrs( + shutdown_actor, 'cloud_node', 'success', 'cancel_reason') shutdown_actor.stop() cloud_node_id = cloud_node.id if not success: + if cancel_reason == self._node_shutdown.NODE_BROKEN: + self.cloud_nodes.blacklist(cloud_node_id) del self.shutdowns[cloud_node_id] elif cloud_node_id in self.booted: self.booted.pop(cloud_node_id).actor.stop() @@ -319,12 +411,14 @@ class NodeManagerDaemonActor(actor_class): def shutdown(self): self._logger.info("Shutting down after signal.") self.poll_stale_after = -1 # Inhibit starting/stopping nodes - for bootnode in self.booting.itervalues(): - bootnode.stop_if_no_cloud_node() + setup_stops = {key: node.stop_if_no_cloud_node() + for key, node in self.booting.iteritems()} + self.booting = {key: self.booting[key] + for key in setup_stops if not setup_stops[key].get()} self._later.await_shutdown() def await_shutdown(self): - if any(node.actor_ref.is_alive() for node in self.booting.itervalues()): + if self.booting: self._timer.schedule(time.time() + 1, self._later.await_shutdown) else: self.stop()