from arvnodeman.computenode.dispatch import ComputeNodeMonitorActor
from . import testutil
from . import test_status
+from . import pykka_timeout
import logging
class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
unittest.TestCase):
+ def assertwait(self, f, timeout=pykka_timeout*2):
+ deadline = time.time() + timeout
+ while True:
+ try:
+ return f()
+ except AssertionError:
+ if time.time() > deadline:
+ raise
+ pass
+ time.sleep(.1)
+ self.daemon.ping().get(self.TIMEOUT)
+
def busywait(self, f):
- n = 0
- while not f() and n < 200:
+ for n in xrange(200):
+ ok = f()
+ if ok:
+ return
time.sleep(.1)
self.daemon.ping().get(self.TIMEOUT)
- n += 1
- self.assertTrue(f())
+ self.assertTrue(ok) # always falsy, but not necessarily False
def mock_node_start(self, **kwargs):
# Make sure that every time the daemon starts a setup actor,
size = testutil.MockSize(1)
self.make_daemon(want_sizes=[size])
self.busywait(lambda: self.node_setup.start.called)
+ self.assertIn('node_quota', status.tracker._latest)
def check_monitors_arvados_nodes(self, *arv_nodes):
- self.busywait(lambda: len(arv_nodes) == len(self.monitored_arvados_nodes()))
- self.assertItemsEqual(arv_nodes, self.monitored_arvados_nodes())
+ self.assertwait(lambda: self.assertItemsEqual(arv_nodes, self.monitored_arvados_nodes()))
def test_node_pairing(self):
cloud_node = testutil.cloud_node_mock(1)
want_sizes=[testutil.MockSize(1)])
self.busywait(lambda: not self.node_setup.start.called)
+ def test_select_stale_node_records_with_slot_numbers_first(self):
+ """
+ Stale node records with slot_number assigned can exist when
+ clean_arvados_node() isn't executed after a node shutdown, for
+ various reasons.
+ NodeManagerDaemonActor should use these stale node records first, so
+ that they don't accumulate unused, reducing the slots available.
+ """
+ size = testutil.MockSize(1)
+ a_long_time_ago = '1970-01-01T01:02:03.04050607Z'
+ arvados_nodes = []
+ for n in range(9):
+ # Add several stale node records without slot_number assigned
+ arvados_nodes.append(
+ testutil.arvados_node_mock(
+ n+1,
+ slot_number=None,
+ modified_at=a_long_time_ago))
+ # Add one record with stale_node assigned, it should be the
+ # first one selected
+ arv_node = testutil.arvados_node_mock(
+ 123,
+ modified_at=a_long_time_ago)
+ arvados_nodes.append(arv_node)
+ cloud_node = testutil.cloud_node_mock(125, size=size)
+ self.make_daemon(cloud_nodes=[cloud_node],
+ arvados_nodes=arvados_nodes)
+ arvados_nodes_tracker = self.daemon.arvados_nodes.get()
+ # Here, find_stale_node() should return the node record with
+ # the slot_number assigned.
+ self.assertEqual(arv_node,
+ arvados_nodes_tracker.find_stale_node(3601))
+
def test_dont_count_missing_as_busy(self):
size = testutil.MockSize(1)
self.make_daemon(cloud_nodes=[testutil.cloud_node_mock(1, size=size),
arvados_nodes=[testutil.arvados_node_mock(1),
testutil.arvados_node_mock(2, last_ping_at='1970-01-01T01:02:03.04050607Z')],
want_sizes=[size])
- self.busywait(lambda: 2 == self.paired_monitor_count())
+ self.assertwait(lambda: self.assertEqual(2, self.paired_monitor_count()))
for mon_ref in self.monitor_list():
self.daemon.node_can_shutdown(mon_ref.proxy()).get(self.TIMEOUT)
self.assertEqual(1, self.node_shutdown.start.call_count)
arvados_nodes=[testutil.arvados_node_mock(1),
testutil.arvados_node_mock(2, last_ping_at='1970-01-01T01:02:03.04050607Z')],
want_sizes=[size])
- self.busywait(lambda: 2 == self.paired_monitor_count())
+ self.assertwait(lambda: self.assertEqual(2, self.paired_monitor_count()))
get_cloud_node = mock.MagicMock(name="get_cloud_node")
get_cloud_node.get.return_value = cloud_nodes[1]
mock_node_monitor = mock.MagicMock()
self.daemon.cloud_nodes.get()[cloud_nodes[1].id].shutdown_actor = mock_shutdown.proxy()
- self.busywait(lambda: 2 == self.alive_monitor_count())
+ self.assertwait(lambda: self.assertEqual(2, self.alive_monitor_count()))
for mon_ref in self.monitor_list():
self.daemon.node_can_shutdown(mon_ref.proxy()).get(self.TIMEOUT)
self.busywait(lambda: 1 == self.node_shutdown.start.call_count)
arv_node = testutil.arvados_node_mock(2, job_uuid=True)
self.make_daemon([testutil.cloud_node_mock(2, size=size)], [arv_node],
[size], avail_sizes=[(size, {"cores":1})])
- self.busywait(lambda: 1 == self.paired_monitor_count())
- self.busywait(lambda: self.node_setup.start.called)
+ self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
+ self.assertwait(lambda: self.assertEqual(1, self.node_setup.start.called))
def test_boot_new_node_below_min_nodes(self):
min_size = testutil.MockSize(1)
arv_node = testutil.arvados_node_mock(1)
size = testutil.MockSize(1)
self.make_daemon(cloud_nodes=[cloud_node], arvados_nodes=[arv_node], want_sizes=[size])
- self.busywait(lambda: 1 == self.paired_monitor_count())
+ self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
monitor = self.monitor_list()[0].proxy()
self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
cloud_node = testutil.cloud_node_mock(1)
arv_node = testutil.arvados_node_mock(1)
self.make_daemon(cloud_nodes=[cloud_node], arvados_nodes=[arv_node], min_nodes=1)
- self.busywait(lambda: 1 == self.paired_monitor_count())
+ self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
monitor = self.monitor_list()[0].proxy()
self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
arv_nodes = [testutil.arvados_node_mock(3, job_uuid=True),
testutil.arvados_node_mock(4, job_uuid=None)]
self.make_daemon(cloud_nodes, arv_nodes, [size])
- self.busywait(lambda: 2 == self.paired_monitor_count())
+ self.assertwait(lambda: self.assertEqual(2, self.paired_monitor_count()))
for mon_ref in self.monitor_list():
monitor = mon_ref.proxy()
if monitor.cloud_node.get(self.TIMEOUT) is cloud_nodes[-1]:
self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.last_shutdown.success.get.return_value = False
self.daemon.node_finished_shutdown(self.last_shutdown).get(self.TIMEOUT)
- self.busywait(lambda: 1 == self.paired_monitor_count())
+ self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.last_shutdown.success.get.return_value = True
self.last_shutdown.stop.side_effect = lambda: monitor.stop()
self.daemon.node_finished_shutdown(self.last_shutdown).get(self.TIMEOUT)
- self.busywait(lambda: 0 == self.paired_monitor_count())
+ self.assertwait(lambda: self.assertEqual(0, self.paired_monitor_count()))
def test_nodes_shutting_down_replaced_below_max_nodes(self):
size = testutil.MockSize(6)
monitor = self.monitor_list()[0].proxy()
self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.assertTrue(self.node_shutdown.start.called)
+ getmock = mock.MagicMock()
+ getmock.get.return_value = False
+ self.last_shutdown.cancel_shutdown.return_value = getmock
self.daemon.update_server_wishlist(
[testutil.MockSize(6)]).get(self.TIMEOUT)
self.busywait(lambda: self.node_setup.start.called)
+ def test_nodes_shutting_down_cancelled(self):
+ size = testutil.MockSize(6)
+ cloud_node = testutil.cloud_node_mock(6, size=size)
+ self.make_daemon([cloud_node], [testutil.arvados_node_mock(6, crunch_worker_state='down')],
+ avail_sizes=[(size, {"cores":1})])
+ self.assertEqual(1, self.alive_monitor_count())
+ monitor = self.monitor_list()[0].proxy()
+ self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+ self.assertTrue(self.node_shutdown.start.called)
+ self.daemon.update_server_wishlist(
+ [testutil.MockSize(6)]).get(self.TIMEOUT)
+ self.busywait(lambda: self.last_shutdown.cancel_shutdown.called)
+
def test_nodes_shutting_down_not_replaced_at_max_nodes(self):
cloud_node = testutil.cloud_node_mock(7)
self.make_daemon([cloud_node], [testutil.arvados_node_mock(7)],
max_nodes=1)
- self.busywait(lambda: 1 == self.paired_monitor_count())
+ self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
monitor = self.monitor_list()[0].proxy()
self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.assertTrue(self.node_shutdown.start.called)
arv_nodes = [testutil.arvados_node_mock(n, size=size) for n in [8, 9]]
self.make_daemon(cloud_nodes, arv_nodes, [size],
avail_sizes=[(size, {"cores":1})])
- self.busywait(lambda: 2 == self.paired_monitor_count())
+ self.assertwait(lambda: self.assertEqual(2, self.paired_monitor_count()))
for mon_ref in self.monitor_list():
self.daemon.node_can_shutdown(mon_ref.proxy()).get(self.TIMEOUT)
self.assertEqual(1, self.node_shutdown.start.call_count)
self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
self.busywait(lambda: 1 == self.last_shutdown.stop.call_count)
+ def test_idle_node_disappearing_clears_status_idle_time_counter(self):
+ size = testutil.MockSize(1)
+ status.tracker._idle_nodes = {}
+ cloud_nodes = [testutil.cloud_node_mock(1, size=size)]
+ arv_nodes = [testutil.arvados_node_mock(1, job_uuid=None)]
+ self.make_daemon(cloud_nodes, arv_nodes, [size])
+ self.assertwait(lambda: self.assertEqual(1, self.paired_monitor_count()))
+ for mon_ref in self.monitor_list():
+ monitor = mon_ref.proxy()
+ if monitor.cloud_node.get(self.TIMEOUT) is cloud_nodes[-1]:
+ break
+ else:
+ self.fail("monitor for idle node not found")
+ self.assertEqual(1, status.tracker.get('nodes_idle'))
+ hostname = monitor.arvados_node.get()['hostname']
+ self.assertIn(hostname, status.tracker._idle_nodes)
+ # Simulate the node disappearing from the cloud node list
+ self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+ self.busywait(lambda: 0 == self.alive_monitor_count())
+ self.assertNotIn(hostname, status.tracker._idle_nodes)
+
def test_shutdown_actor_cleanup_copes_with_dead_actors(self):
self.make_daemon(cloud_nodes=[testutil.cloud_node_mock()])
self.assertEqual(1, self.alive_monitor_count())
big = testutil.MockSize(2)
avail_sizes = [(testutil.MockSize(1), {"cores":1}),
(testutil.MockSize(2), {"cores":2})]
- self.make_daemon(want_sizes=[small, small, small, big],
+ self.make_daemon(want_sizes=[small, small, big, small],
avail_sizes=avail_sizes, max_nodes=3)
# the daemon runs in another thread, so we need to wait and see
self.assertEqual(2, sizecounts[small.id])
self.assertEqual(1, sizecounts[big.id])
+ def test_wishlist_ordering(self):
+ # Check that big nodes aren't prioritized; since #12199 containers are
+ # scheduled on specific node sizes.
+ small = testutil.MockSize(1)
+ big = testutil.MockSize(2)
+ avail_sizes = [(testutil.MockSize(1), {"cores":1}),
+ (testutil.MockSize(2), {"cores":2})]
+ self.make_daemon(want_sizes=[small, small, small, big],
+ avail_sizes=avail_sizes, max_nodes=3)
+
+ # the daemon runs in another thread, so we need to wait and see
+ # if it does all the work we're expecting it to do before stopping it.
+ self.busywait(lambda: self.node_setup.start.call_count == 3)
+ booting = self.daemon.booting.get(self.TIMEOUT)
+ self.stop_proxy(self.daemon)
+ sizecounts = {a[0].id: 0 for a in avail_sizes}
+ for b in booting.itervalues():
+ sizecounts[b.cloud_size.get().id] += 1
+ self.assertEqual(3, sizecounts[small.id])
+ self.assertEqual(0, sizecounts[big.id])
+
def test_wishlist_reconfigure(self):
small = testutil.MockSize(1)
big = testutil.MockSize(2)
testutil.arvados_node_mock(3)],
want_sizes=[small, small, big],
avail_sizes=avail_sizes)
- self.busywait(lambda: 3 == self.paired_monitor_count())
+ self.assertwait(lambda: self.assertEqual(3, self.paired_monitor_count()))
self.daemon.update_server_wishlist([small, big, big]).get(self.TIMEOUT)
self.assertEqual(0, self.node_shutdown.start.call_count)