def _send_request(self):
queuelist = []
if self.slurm_queue:
- # cpus, memory, tempory disk space, reason, job name, feature constraints
+ # cpus, memory, tempory disk space, reason, job name, feature constraints, priority
squeue_out = subprocess.check_output(["squeue", "--state=PENDING", "--noheader", "--format=%c|%m|%d|%r|%j|%f|%Q"])
for out in squeue_out.splitlines():
try:
},
"priority": int(priority)
})
- queuelist = sorted(queuelist, key=lambda x: x.get('priority', 1), reverse=True)
+ queuelist.sort(key=lambda x: x.get('priority', 1), reverse=True)
if self.jobs_queue:
queuelist.extend(self._client.jobs().queue().execute()['items'])
self.assertEqual(2, sizecounts[small.id])
self.assertEqual(1, sizecounts[big.id])
+ def test_wishlist_ordering(self):
+ # Check that big nodes aren't prioritized; since #12199 containers are
+ # scheduled on specific node sizes.
+ small = testutil.MockSize(1)
+ big = testutil.MockSize(2)
+ avail_sizes = [(testutil.MockSize(1), {"cores":1}),
+ (testutil.MockSize(2), {"cores":2})]
+ self.make_daemon(want_sizes=[small, small, small, big],
+ avail_sizes=avail_sizes, max_nodes=3)
+
+ # the daemon runs in another thread, so we need to wait and see
+ # if it does all the work we're expecting it to do before stopping it.
+ self.busywait(lambda: self.node_setup.start.call_count == 3)
+ booting = self.daemon.booting.get(self.TIMEOUT)
+ self.stop_proxy(self.daemon)
+ sizecounts = {a[0].id: 0 for a in avail_sizes}
+ for b in booting.itervalues():
+ sizecounts[b.cloud_size.get().id] += 1
+ self.assertEqual(3, sizecounts[small.id])
+ self.assertEqual(0, sizecounts[big.id])
+
def test_wishlist_reconfigure(self):
small = testutil.MockSize(1)
big = testutil.MockSize(2)