Merge branch '8697-ruby187-compat'
[arvados.git] / services / nodemanager / arvnodeman / daemon.py
index 8552f2a922872837c29dff323e2814d922fda3f9..11204409c03cd1894ac4128f1bfc84e3a4767652 100644 (file)
@@ -25,6 +25,7 @@ class _BaseNodeTracker(object):
     def __init__(self):
         self.nodes = {}
         self.orphans = {}
+        self._blacklist = set()
 
     # Proxy the methods listed below to self.nodes.
     def _proxy_method(name):
@@ -43,6 +44,9 @@ class _BaseNodeTracker(object):
     def add(self, record):
         self.nodes[self.record_key(record)] = record
 
+    def blacklist(self, key):
+        self._blacklist.add(key)
+
     def update_record(self, key, item):
         setattr(self.nodes[key], self.RECORD_ATTR, item)
 
@@ -50,7 +54,9 @@ class _BaseNodeTracker(object):
         unseen = set(self.nodes.iterkeys())
         for item in response:
             key = self.item_key(item)
-            if key in unseen:
+            if key in self._blacklist:
+                continue
+            elif key in unseen:
                 unseen.remove(key)
                 self.update_record(key, item)
             else:
@@ -97,13 +103,15 @@ class NodeManagerDaemonActor(actor_class):
     def __init__(self, server_wishlist_actor, arvados_nodes_actor,
                  cloud_nodes_actor, cloud_update_actor, timer_actor,
                  arvados_factory, cloud_factory,
-                 shutdown_windows, min_size, min_nodes, max_nodes,
+                 shutdown_windows, server_calculator,
+                 min_nodes, max_nodes,
                  poll_stale_after=600,
                  boot_fail_after=1800,
                  node_stale_after=7200,
                  node_setup_class=dispatch.ComputeNodeSetupActor,
                  node_shutdown_class=dispatch.ComputeNodeShutdownActor,
-                 node_actor_class=dispatch.ComputeNodeMonitorActor):
+                 node_actor_class=dispatch.ComputeNodeMonitorActor,
+                 max_total_price=0):
         super(NodeManagerDaemonActor, self).__init__()
         self._node_setup = node_setup_class
         self._node_shutdown = node_shutdown_class
@@ -113,12 +121,13 @@ class NodeManagerDaemonActor(actor_class):
         self._new_arvados = arvados_factory
         self._new_cloud = cloud_factory
         self._cloud_driver = self._new_cloud()
-        self._logger = logging.getLogger('arvnodeman.daemon')
-        self._later = self.actor_ref.proxy()
+        self._later = self.actor_ref.tell_proxy()
         self.shutdown_windows = shutdown_windows
-        self.min_cloud_size = min_size
+        self.server_calculator = server_calculator
+        self.min_cloud_size = self.server_calculator.cheapest_size()
         self.min_nodes = min_nodes
         self.max_nodes = max_nodes
+        self.max_total_price = max_total_price
         self.poll_stale_after = poll_stale_after
         self.boot_fail_after = boot_fail_after
         self.node_stale_after = node_stale_after
@@ -133,14 +142,18 @@ class NodeManagerDaemonActor(actor_class):
         self.booting = {}       # Actor IDs to ComputeNodeSetupActors
         self.booted = {}        # Cloud node IDs to _ComputeNodeRecords
         self.shutdowns = {}     # Cloud node IDs to ComputeNodeShutdownActors
-        self._logger.debug("Daemon initialized")
+        self.sizes_booting_shutdown = {} # Actor IDs or Cloud node IDs to node size
+
+    def on_start(self):
+        self._logger = logging.getLogger("%s.%s" % (self.__class__.__name__, self.actor_urn[33:]))
+        self._logger.debug("Daemon started")
 
     def _update_poll_time(self, poll_key):
         self.last_polls[poll_key] = time.time()
 
     def _pair_nodes(self, node_record, arvados_node):
-        self._logger.info("Cloud node %s has associated with Arvados node %s",
-                          node_record.cloud_node.id, arvados_node['uuid'])
+        self._logger.info("Cloud node %s is now paired with Arvados node %s",
+                          node_record.cloud_node.name, arvados_node['uuid'])
         self._arvados_nodes_actor.subscribe_to(
             arvados_node['uuid'], node_record.actor.update_arvados_node)
         node_record.arvados_node = arvados_node
@@ -159,11 +172,14 @@ class NodeManagerDaemonActor(actor_class):
             timer_actor=self._timer,
             arvados_node=None,
             poll_stale_after=self.poll_stale_after,
-            node_stale_after=self.node_stale_after).proxy()
-        actor.subscribe(self._later.node_can_shutdown)
+            node_stale_after=self.node_stale_after,
+            cloud_client=self._cloud_driver,
+            boot_fail_after=self.boot_fail_after)
+        actorTell = actor.tell_proxy()
+        actorTell.subscribe(self._later.node_can_shutdown)
         self._cloud_nodes_actor.subscribe_to(cloud_node.id,
-                                             actor.update_cloud_node)
-        record = _ComputeNodeRecord(actor, cloud_node)
+                                             actorTell.update_cloud_node)
+        record = _ComputeNodeRecord(actor.proxy(), cloud_node)
         return record
 
     def update_cloud_nodes(self, nodelist):
@@ -180,9 +196,15 @@ class NodeManagerDaemonActor(actor_class):
                     self._pair_nodes(record, arv_rec.arvados_node)
                     break
         for key, record in self.cloud_nodes.orphans.iteritems():
+            if key in self.shutdowns:
+                try:
+                    self.shutdowns[key].stop().get()
+                except pykka.ActorDeadError:
+                    pass
+                del self.shutdowns[key]
+                del self.sizes_booting_shutdown[key]
             record.actor.stop()
             record.cloud_node = None
-            self.shutdowns.pop(key, None)
 
     def update_arvados_nodes(self, nodelist):
         self._update_poll_time('arvados_nodes')
@@ -197,44 +219,127 @@ class NodeManagerDaemonActor(actor_class):
                     self._pair_nodes(cloud_rec, arv_node)
                     break
 
-    def _nodes_up(self):
-        return sum(len(nodelist) for nodelist in
-                   [self.cloud_nodes, self.booted, self.booting])
-
-    def _nodes_busy(self):
-        return sum(1 for idle in
-                   pykka.get_all(rec.actor.in_state('idle') for rec in
-                                 self.cloud_nodes.nodes.itervalues())
-                   if idle is False)
+    def _nodes_booting(self, size):
+        s = sum(1
+                for c in self.booting.iterkeys()
+                if size is None or self.sizes_booting_shutdown[c].id == size.id)
+        s += sum(1
+                 for c in self.booted.itervalues()
+                 if size is None or c.cloud_node.size.id == size.id)
+        return s
+
+    def _nodes_unpaired(self, size):
+        return sum(1
+                   for c in self.cloud_nodes.unpaired()
+                   if size is None or c.cloud_node.size.id == size.id)
+
+    def _nodes_booted(self, size):
+        return sum(1
+                  for c in self.cloud_nodes.nodes.itervalues()
+                  if size is None or c.cloud_node.size.id == size.id)
+
+    def _nodes_down(self, size):
+        # Make sure to iterate over self.cloud_nodes because what we're
+        # counting here are compute nodes that are reported by the cloud
+        # provider but are considered "down" by Arvados.
+        return sum(1 for down in
+                   pykka.get_all(rec.actor.in_state('down') for rec in
+                                 self.cloud_nodes.nodes.itervalues()
+                                 if size is None or rec.cloud_node.size.id == size.id)
+                   if down)
+
+    def _nodes_up(self, size):
+        up = (self._nodes_booting(size) + self._nodes_booted(size)) - self._nodes_down(size)
+        return up
+
+    def _total_price(self):
+        cost = 0
+        cost += sum(self.server_calculator.find_size(self.sizes_booting_shutdown[c].id).price
+                  for c in self.booting.iterkeys())
+        cost += sum(self.server_calculator.find_size(c.cloud_node.size.id).price
+                    for i in (self.booted, self.cloud_nodes.nodes)
+                    for c in i.itervalues())
+        return cost
+
+    def _nodes_busy(self, size):
+        return sum(1 for busy in
+                   pykka.get_all(rec.actor.in_state('busy') for rec in
+                                 self.cloud_nodes.nodes.itervalues()
+                                 if rec.cloud_node.size.id == size.id)
+                   if busy)
+
+    def _nodes_missing(self, size):
+        return sum(1 for arv_node in
+                   pykka.get_all(rec.actor.arvados_node for rec in
+                                 self.cloud_nodes.nodes.itervalues()
+                                 if rec.cloud_node.size.id == size.id and rec.actor.cloud_node.get().id not in self.shutdowns)
+                   if arv_node and cnode.arvados_node_missing(arv_node, self.node_stale_after))
+
+    def _size_wishlist(self, size):
+        return sum(1 for c in self.last_wishlist if c.id == size.id)
+
+    def _size_shutdowns(self, size):
+        sh = 0
+        for c in self.shutdowns.iterkeys():
+            try:
+                if self.sizes_booting_shutdown[c].id == size.id:
+                    sh += 1
+            except pykka.ActorDeadError:
+                pass
+        return sh
+
+    def _nodes_wanted(self, size):
+        total_up_count = self._nodes_up(None)
+        under_min = self.min_nodes - total_up_count
+        over_max = total_up_count - self.max_nodes
+        total_price = self._total_price()
 
-    def _nodes_wanted(self):
-        up_count = self._nodes_up()
-        under_min = self.min_nodes - up_count
-        over_max = up_count - self.max_nodes
         if over_max >= 0:
             return -over_max
-        elif under_min > 0:
+        elif under_min > 0 and size.id == self.min_cloud_size.id:
             return under_min
+
+        booting_count = self._nodes_booting(size) + self._nodes_unpaired(size)
+        shutdown_count = self._size_shutdowns(size)
+        busy_count = self._nodes_busy(size)
+        idle_count = self._nodes_up(size) - (busy_count + self._nodes_missing(size))
+
+        self._logger.info("%s: wishlist %i, up %i (booting %i, idle %i, busy %i), shutting down %i", size.name,
+                          self._size_wishlist(size),
+                          idle_count + busy_count,
+                          booting_count,
+                          idle_count - booting_count,
+                          busy_count,
+                          shutdown_count)
+
+        wanted = self._size_wishlist(size) - idle_count
+        if wanted > 0 and self.max_total_price and ((total_price + (size.price*wanted)) > self.max_total_price):
+            can_boot = int((self.max_total_price - total_price) / size.price)
+            if can_boot == 0:
+                self._logger.info("Not booting %s (price %s) because with it would exceed max_total_price of %s (current total_price is %s)",
+                                  size.name, size.price, self.max_total_price, total_price)
+            return can_boot
         else:
-            up_count -= len(self.shutdowns) + self._nodes_busy()
-            return len(self.last_wishlist) - up_count
-
-    def _nodes_excess(self):
-        up_count = self._nodes_up() - len(self.shutdowns)
-        over_min = up_count - self.min_nodes
-        if over_min <= 0:
-            return over_min
-        else:
-            return up_count - self._nodes_busy() - len(self.last_wishlist)
+            return wanted
+
+    def _nodes_excess(self, size):
+        up_count = (self._nodes_booting(size) + self._nodes_booted(size)) - self._size_shutdowns(size)
+        if size.id == self.min_cloud_size.id:
+            up_count -= self.min_nodes
+        return up_count - self._nodes_busy(size) - self._size_wishlist(size)
 
     def update_server_wishlist(self, wishlist):
         self._update_poll_time('server_wishlist')
         self.last_wishlist = wishlist
-        nodes_wanted = self._nodes_wanted()
-        if nodes_wanted > 0:
-            self._later.start_node()
-        elif (nodes_wanted < 0) and self.booting:
-            self._later.stop_booting_node()
+        for size in reversed(self.server_calculator.cloud_sizes):
+            try:
+                nodes_wanted = self._nodes_wanted(size)
+                if nodes_wanted > 0:
+                    self._later.start_node(size)
+                elif (nodes_wanted < 0) and self.booting:
+                    self._later.stop_booting_node(size)
+            except Exception as e:
+                self._logger.exception("while calculating nodes wanted for size %s", size)
 
     def _check_poll_freshness(orig_func):
         """Decorator to inhibit a method when poll information is stale.
@@ -254,16 +359,12 @@ class NodeManagerDaemonActor(actor_class):
         return wrapper
 
     @_check_poll_freshness
-    def start_node(self):
-        nodes_wanted = self._nodes_wanted()
+    def start_node(self, cloud_size):
+        nodes_wanted = self._nodes_wanted(cloud_size)
         if nodes_wanted < 1:
             return None
         arvados_node = self.arvados_nodes.find_stale_node(self.node_stale_after)
-        try:
-            cloud_size = self.last_wishlist[self._nodes_up()]
-        except IndexError:
-            cloud_size = self.min_cloud_size
-        self._logger.info("Want %s more nodes.  Booting a %s node.",
+        self._logger.info("Want %i more %s nodes.  Booting a node.",
                           nodes_wanted, cloud_size.name)
         new_setup = self._node_setup.start(
             timer_actor=self._timer,
@@ -272,12 +373,14 @@ class NodeManagerDaemonActor(actor_class):
             cloud_client=self._new_cloud(),
             cloud_size=cloud_size).proxy()
         self.booting[new_setup.actor_ref.actor_urn] = new_setup
+        self.sizes_booting_shutdown[new_setup.actor_ref.actor_urn] = cloud_size
+
         if arvados_node is not None:
             self.arvados_nodes[arvados_node['uuid']].assignment_time = (
                 time.time())
         new_setup.subscribe(self._later.node_up)
         if nodes_wanted > 1:
-            self._later.start_node()
+            self._later.start_node(cloud_size)
 
     def _get_actor_attrs(self, actor, *attr_names):
         return pykka.get_all([getattr(actor, name) for name in attr_names])
@@ -285,39 +388,47 @@ class NodeManagerDaemonActor(actor_class):
     def node_up(self, setup_proxy):
         cloud_node = setup_proxy.cloud_node.get()
         del self.booting[setup_proxy.actor_ref.actor_urn]
+        del self.sizes_booting_shutdown[setup_proxy.actor_ref.actor_urn]
+
         setup_proxy.stop()
-        record = self.cloud_nodes.get(cloud_node.id)
-        if record is None:
-            record = self._new_node(cloud_node)
-            self.booted[cloud_node.id] = record
-        self._timer.schedule(time.time() + self.boot_fail_after,
-                             self._later.shutdown_unpaired_node, cloud_node.id)
+        if cloud_node is not None:
+            record = self.cloud_nodes.get(cloud_node.id)
+            if record is None:
+                record = self._new_node(cloud_node)
+                self.booted[cloud_node.id] = record
+            self._timer.schedule(time.time() + self.boot_fail_after,
+                                 self._later.shutdown_unpaired_node, cloud_node.id)
 
     @_check_poll_freshness
-    def stop_booting_node(self):
-        nodes_excess = self._nodes_excess()
+    def stop_booting_node(self, size):
+        nodes_excess = self._nodes_excess(size)
         if (nodes_excess < 1) or not self.booting:
             return None
         for key, node in self.booting.iteritems():
-            if node.stop_if_no_cloud_node().get():
+            if node.cloud_size.get().id == size.id and node.stop_if_no_cloud_node().get():
                 del self.booting[key]
+                del self.sizes_booting_shutdown[key]
+
                 if nodes_excess > 1:
-                    self._later.stop_booting_node()
+                    self._later.stop_booting_node(size)
                 break
 
     def _begin_node_shutdown(self, node_actor, cancellable):
-        cloud_node_id = node_actor.cloud_node.get().id
+        cloud_node_obj = node_actor.cloud_node.get()
+        cloud_node_id = cloud_node_obj.id
         if cloud_node_id in self.shutdowns:
             return None
         shutdown = self._node_shutdown.start(
             timer_actor=self._timer, cloud_client=self._new_cloud(),
-            node_monitor=node_actor.actor_ref, cancellable=cancellable).proxy()
-        self.shutdowns[cloud_node_id] = shutdown
-        shutdown.subscribe(self._later.node_finished_shutdown)
+            arvados_client=self._new_arvados(),
+            node_monitor=node_actor.actor_ref, cancellable=cancellable)
+        self.shutdowns[cloud_node_id] = shutdown.proxy()
+        self.sizes_booting_shutdown[cloud_node_id] = cloud_node_obj.size
+        shutdown.tell_proxy().subscribe(self._later.node_finished_shutdown)
 
     @_check_poll_freshness
     def node_can_shutdown(self, node_actor):
-        if self._nodes_excess() > 0:
+        if self._nodes_excess(node_actor.cloud_node.get().size) > 0:
             self._begin_node_shutdown(node_actor, cancellable=True)
 
     def shutdown_unpaired_node(self, cloud_node_id):
@@ -331,15 +442,17 @@ class NodeManagerDaemonActor(actor_class):
             self._begin_node_shutdown(record.actor, cancellable=False)
 
     def node_finished_shutdown(self, shutdown_actor):
-        success, cloud_node = self._get_actor_attrs(shutdown_actor, 'success',
-                                                    'cloud_node')
+        cloud_node, success, cancel_reason = self._get_actor_attrs(
+            shutdown_actor, 'cloud_node', 'success', 'cancel_reason')
         shutdown_actor.stop()
         cloud_node_id = cloud_node.id
         if not success:
-            del self.shutdowns[cloud_node_id]
+            if cancel_reason == self._node_shutdown.NODE_BROKEN:
+                self.cloud_nodes.blacklist(cloud_node_id)
         elif cloud_node_id in self.booted:
             self.booted.pop(cloud_node_id).actor.stop()
-            del self.shutdowns[cloud_node_id]
+        del self.shutdowns[cloud_node_id]
+        del self.sizes_booting_shutdown[cloud_node_id]
 
     def shutdown(self):
         self._logger.info("Shutting down after signal.")