cloud_size=get_cloud_size,
actor_ref=mock_actor)
mock_actor.proxy.return_value = mock_proxy
+ mock_actor.tell_proxy.return_value = mock_proxy
self.last_setup = mock_proxy
return mock_actor
self.cloud_factory().node_start_time.return_value = time.time()
self.cloud_updates = mock.MagicMock(name='updates_mock')
self.timer = testutil.MockTimer(deliver_immediately=False)
+ self.cloud_factory().node_id.side_effect = lambda node: node.id
self.node_setup = mock.MagicMock(name='setup_mock')
self.node_setup.start.side_effect = self.mock_node_start
def test_node_pairing_after_arvados_update(self):
cloud_node = testutil.cloud_node_mock(2)
self.make_daemon([cloud_node],
- [testutil.arvados_node_mock(2, ip_address=None)])
+ [testutil.arvados_node_mock(1, ip_address=None)])
arv_node = testutil.arvados_node_mock(2)
self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
mock_shutdown = self.node_shutdown.start(node_monitor=mock_node_monitor)
self.daemon.shutdowns.get()[cloud_nodes[1].id] = mock_shutdown.proxy()
+ self.daemon.sizes_booting_shutdown.get()[cloud_nodes[1].id] = size
self.assertEqual(2, self.alive_monitor_count())
for mon_ref in self.monitor_list():
self.last_setup.arvados_node.get.return_value = arv_node
return self.last_setup
- def test_no_new_node_when_booted_node_not_usable(self):
+ def test_new_node_when_booted_node_not_usable(self):
cloud_node = testutil.cloud_node_mock(4)
arv_node = testutil.arvados_node_mock(4, crunch_worker_state='down')
setup = self.start_node_boot(cloud_node, arv_node)
self.daemon.update_server_wishlist(
[testutil.MockSize(1)]).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
- self.assertEqual(1, self.node_setup.start.call_count)
+ self.assertEqual(2, self.node_setup.start.call_count)
def test_no_duplication_when_booting_node_listed_fast(self):
# Test that we don't start two ComputeNodeMonitorActors when
(testutil.MockSize(2), {"cores":2})]
self.make_daemon(want_sizes=[small, small, small, big],
avail_sizes=avail_sizes, max_nodes=4)
+
+ # the daemon runs in another thread, so we need to wait and see
+ # if it does all the work we're expecting it to do before stopping it.
self.busywait(lambda: self.node_setup.start.call_count == 4)
booting = self.daemon.booting.get(self.TIMEOUT)
self.stop_proxy(self.daemon)
(testutil.MockSize(2), {"cores":2})]
self.make_daemon(want_sizes=[small, small, small, big],
avail_sizes=avail_sizes, max_nodes=3)
+
+ # the daemon runs in another thread, so we need to wait and see
+ # if it does all the work we're expecting it to do before stopping it.
self.busywait(lambda: self.node_setup.start.call_count == 3)
booting = self.daemon.booting.get(self.TIMEOUT)
self.stop_proxy(self.daemon)
sizecounts = {a[0].id: 0 for a in avail_sizes}
for b in booting.itervalues():
sizecounts[b.cloud_size.get().id] += 1
- logging.info(sizecounts)
self.assertEqual(2, sizecounts[small.id])
self.assertEqual(1, sizecounts[big.id])
avail_sizes=avail_sizes,
max_nodes=4,
max_total_price=4)
+ # the daemon runs in another thread, so we need to wait and see
+ # if it does all the work we're expecting it to do before stopping it.
self.busywait(lambda: self.node_setup.start.call_count == 3)
booting = self.daemon.booting.get()
self.stop_proxy(self.daemon)
- self.assertEqual(3, self.node_setup.start.call_count)
+
sizecounts = {a[0].id: 0 for a in avail_sizes}
for b in booting.itervalues():
sizecounts[b.cloud_size.get().id] += 1
logging.info(sizecounts)
- # The way the update_server_wishlist() works effectively results in a
- # round-robin creation of one node of each size in the wishlist
+
+ # Booting 3 small nodes and not booting a big node would also partially
+ # satisfy the wishlist and come in under the price cap, however the way
+ # the update_server_wishlist() currently works effectively results in a
+ # round-robin creation of one node of each size in the wishlist, so
+ # test for that.
self.assertEqual(2, sizecounts[small.id])
self.assertEqual(1, sizecounts[big.id])
+
+ @mock.patch("arvnodeman.daemon.NodeManagerDaemonActor._resume_node")
+ def test_resume_drained_nodes(self, resume_node):
+ cloud_node = testutil.cloud_node_mock(1)
+ arv_node = testutil.arvados_node_mock(1, info={"ec2_instance_id": "1", "slurm_state": "down"})
+ self.make_daemon([cloud_node], [arv_node])
+ resume_node.assert_called_with(self.daemon.cloud_nodes.get(self.TIMEOUT).nodes.values()[0])
+ self.stop_proxy(self.daemon)
+
+ @mock.patch("arvnodeman.daemon.NodeManagerDaemonActor._resume_node")
+ def test_no_resume_shutdown_nodes(self, resume_node):
+ cloud_node = testutil.cloud_node_mock(1)
+ arv_node = testutil.arvados_node_mock(1, info={"ec2_instance_id": "1", "slurm_state": "down"})
+
+ self.make_daemon([cloud_node], [])
+
+ self.node_shutdown = mock.MagicMock(name='shutdown_mock')
+ self.daemon.shutdowns.get(self.TIMEOUT)[cloud_node.id] = self.node_shutdown
+
+ self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
+ self.stop_proxy(self.daemon)
+ resume_node.assert_not_called()