cloud_size=get_cloud_size,
actor_ref=mock_actor)
mock_actor.proxy.return_value = mock_proxy
+ mock_actor.tell_proxy.return_value = mock_proxy
self.last_setup = mock_proxy
return mock_actor
def make_daemon(self, cloud_nodes=[], arvados_nodes=[], want_sizes=[],
avail_sizes=[(testutil.MockSize(1), {"cores": 1})],
min_nodes=0, max_nodes=8,
- shutdown_windows=[54, 5, 1]):
+ shutdown_windows=[54, 5, 1],
+ max_total_price=None):
for name in ['cloud_nodes', 'arvados_nodes', 'server_wishlist']:
setattr(self, name + '_poller', mock.MagicMock(name=name + '_mock'))
self.arv_factory = mock.MagicMock(name='arvados_mock')
self.cloud_factory().node_start_time.return_value = time.time()
self.cloud_updates = mock.MagicMock(name='updates_mock')
self.timer = testutil.MockTimer(deliver_immediately=False)
+ self.cloud_factory().node_id.side_effect = lambda node: node.id
self.node_setup = mock.MagicMock(name='setup_mock')
self.node_setup.start.side_effect = self.mock_node_start
self.arv_factory, self.cloud_factory,
shutdown_windows, ServerCalculator(avail_sizes),
min_nodes, max_nodes, 600, 1800, 3600,
- self.node_setup, self.node_shutdown).proxy()
+ self.node_setup, self.node_shutdown,
+ max_total_price=max_total_price).proxy()
if cloud_nodes is not None:
self.daemon.update_cloud_nodes(cloud_nodes).get(self.TIMEOUT)
if arvados_nodes is not None:
def test_node_pairing_after_arvados_update(self):
cloud_node = testutil.cloud_node_mock(2)
self.make_daemon([cloud_node],
- [testutil.arvados_node_mock(2, ip_address=None)])
+ [testutil.arvados_node_mock(1, ip_address=None)])
arv_node = testutil.arvados_node_mock(2)
self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
mock_shutdown = self.node_shutdown.start(node_monitor=mock_node_monitor)
self.daemon.shutdowns.get()[cloud_nodes[1].id] = mock_shutdown.proxy()
+ self.daemon.sizes_booting_shutdown.get()[cloud_nodes[1].id] = size
self.assertEqual(2, self.alive_monitor_count())
for mon_ref in self.monitor_list():
self.last_setup.arvados_node.get.return_value = arv_node
return self.last_setup
- def test_no_new_node_when_booted_node_not_usable(self):
+ def test_new_node_when_booted_node_not_usable(self):
cloud_node = testutil.cloud_node_mock(4)
arv_node = testutil.arvados_node_mock(4, crunch_worker_state='down')
setup = self.start_node_boot(cloud_node, arv_node)
self.daemon.update_server_wishlist(
[testutil.MockSize(1)]).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
- self.assertEqual(1, self.node_setup.start.call_count)
+ self.assertEqual(2, self.node_setup.start.call_count)
def test_no_duplication_when_booting_node_listed_fast(self):
# Test that we don't start two ComputeNodeMonitorActors when
self.stop_proxy(self.daemon)
self.assertEqual(1, self.last_shutdown.stop.call_count)
+ def busywait(self, f):
+ n = 0
+ while not f() and n < 10:
+ time.sleep(.1)
+ n += 1
+ self.assertTrue(f())
+
def test_node_create_two_sizes(self):
small = testutil.MockSize(1)
big = testutil.MockSize(2)
avail_sizes = [(testutil.MockSize(1), {"cores":1}),
(testutil.MockSize(2), {"cores":2})]
- self.make_daemon(want_sizes=[small, small, big],
- avail_sizes=avail_sizes)
- booting = self.daemon.booting.get()
+ self.make_daemon(want_sizes=[small, small, small, big],
+ avail_sizes=avail_sizes, max_nodes=4)
+
+ # the daemon runs in another thread, so we need to wait and see
+ # if it does all the work we're expecting it to do before stopping it.
+ self.busywait(lambda: self.node_setup.start.call_count == 4)
+ booting = self.daemon.booting.get(self.TIMEOUT)
self.stop_proxy(self.daemon)
- self.assertEqual(3, self.node_setup.start.call_count)
sizecounts = {a[0].id: 0 for a in avail_sizes}
for b in booting.itervalues():
sizecounts[b.cloud_size.get().id] += 1
logging.info(sizecounts)
+ self.assertEqual(3, sizecounts[small.id])
+ self.assertEqual(1, sizecounts[big.id])
+
+ def test_node_max_nodes_two_sizes(self):
+ small = testutil.MockSize(1)
+ big = testutil.MockSize(2)
+ avail_sizes = [(testutil.MockSize(1), {"cores":1}),
+ (testutil.MockSize(2), {"cores":2})]
+ self.make_daemon(want_sizes=[small, small, small, big],
+ avail_sizes=avail_sizes, max_nodes=3)
+
+ # the daemon runs in another thread, so we need to wait and see
+ # if it does all the work we're expecting it to do before stopping it.
+ self.busywait(lambda: self.node_setup.start.call_count == 3)
+ booting = self.daemon.booting.get(self.TIMEOUT)
+ self.stop_proxy(self.daemon)
+ sizecounts = {a[0].id: 0 for a in avail_sizes}
+ for b in booting.itervalues():
+ sizecounts[b.cloud_size.get().id] += 1
self.assertEqual(2, sizecounts[small.id])
self.assertEqual(1, sizecounts[big.id])
self.assertEqual(1, self.node_setup.start.call_count)
self.assertEqual(1, self.node_shutdown.start.call_count)
+ # booting a new big node
sizecounts = {a[0].id: 0 for a in avail_sizes}
for b in booting.itervalues():
sizecounts[b.cloud_size.get().id] += 1
self.assertEqual(0, sizecounts[small.id])
self.assertEqual(1, sizecounts[big.id])
+ # shutting down a small node
sizecounts = {a[0].id: 0 for a in avail_sizes}
for b in shutdowns.itervalues():
sizecounts[b.cloud_node.get().size.id] += 1
self.assertEqual(1, sizecounts[small.id])
self.assertEqual(0, sizecounts[big.id])
+
+ def test_node_max_price(self):
+ small = testutil.MockSize(1)
+ big = testutil.MockSize(2)
+ avail_sizes = [(testutil.MockSize(1), {"cores":1, "price":1}),
+ (testutil.MockSize(2), {"cores":2, "price":2})]
+ self.make_daemon(want_sizes=[small, small, small, big],
+ avail_sizes=avail_sizes,
+ max_nodes=4,
+ max_total_price=4)
+ # the daemon runs in another thread, so we need to wait and see
+ # if it does all the work we're expecting it to do before stopping it.
+ self.busywait(lambda: self.node_setup.start.call_count == 3)
+ booting = self.daemon.booting.get()
+ self.stop_proxy(self.daemon)
+
+ sizecounts = {a[0].id: 0 for a in avail_sizes}
+ for b in booting.itervalues():
+ sizecounts[b.cloud_size.get().id] += 1
+ logging.info(sizecounts)
+
+ # Booting 3 small nodes and not booting a big node would also partially
+ # satisfy the wishlist and come in under the price cap, however the way
+ # the update_server_wishlist() currently works effectively results in a
+ # round-robin creation of one node of each size in the wishlist, so
+ # test for that.
+ self.assertEqual(2, sizecounts[small.id])
+ self.assertEqual(1, sizecounts[big.id])
+
+ @mock.patch("arvnodeman.daemon.NodeManagerDaemonActor._resume_node")
+ def test_resume_drained_nodes(self, resume_node):
+ cloud_node = testutil.cloud_node_mock(1)
+ arv_node = testutil.arvados_node_mock(1, info={"ec2_instance_id": "1", "slurm_state": "down"})
+ self.make_daemon([cloud_node], [arv_node])
+ resume_node.assert_called_with(self.daemon.cloud_nodes.get(self.TIMEOUT).nodes.values()[0])
+ self.stop_proxy(self.daemon)
+
+ @mock.patch("arvnodeman.daemon.NodeManagerDaemonActor._resume_node")
+ def test_no_resume_shutdown_nodes(self, resume_node):
+ cloud_node = testutil.cloud_node_mock(1)
+ arv_node = testutil.arvados_node_mock(1, info={"ec2_instance_id": "1", "slurm_state": "down"})
+
+ self.make_daemon([cloud_node], [])
+
+ self.node_shutdown = mock.MagicMock(name='shutdown_mock')
+ self.daemon.shutdowns.get(self.TIMEOUT)[cloud_node.id] = self.node_shutdown
+
+ self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
+ self.stop_proxy(self.daemon)
+ resume_node.assert_not_called()