X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/10402f74ccdae453ae353c389a02c0415adcf1e7..f9e94997cb5c2166d8b71874f263544cfc2fe5ba:/services/nodemanager/arvnodeman/jobqueue.py diff --git a/services/nodemanager/arvnodeman/jobqueue.py b/services/nodemanager/arvnodeman/jobqueue.py index 4d2d3e0c0a..e91764474f 100644 --- a/services/nodemanager/arvnodeman/jobqueue.py +++ b/services/nodemanager/arvnodeman/jobqueue.py @@ -6,6 +6,7 @@ from __future__ import absolute_import, print_function import logging +import re import subprocess import arvados.util @@ -23,6 +24,26 @@ class ServerCalculator(object): that would best satisfy the jobs, choosing the cheapest size that satisfies each job, and ignoring jobs that can't be satisfied. """ + class InvalidCloudSize(object): + """ + Dummy CloudSizeWrapper-like class, to be used when a cloud node doesn't + have a recognizable arvados_node_size tag. + """ + def __init__(self): + self.id = 'invalid' + self.name = 'invalid' + self.ram = 0 + self.disk = 0 + self.scratch = 0 + self.cores = 0 + self.bandwidth = 0 + self.price = 9999999 + self.preemptible = False + self.extra = {} + + def meets_constraints(self, **kwargs): + return False + class CloudSizeWrapper(object): def __init__(self, real_size, node_mem_scaling, **kwargs): @@ -37,7 +58,9 @@ class ServerCalculator(object): self.disk = 0 self.scratch = self.disk * 1000 self.ram = int(self.ram * node_mem_scaling) + self.preemptible = False for name, override in kwargs.iteritems(): + if name == 'instance_type': continue if not hasattr(self, name): raise ValueError("unrecognized size field '%s'" % (name,)) setattr(self, name, override) @@ -74,13 +97,17 @@ class ServerCalculator(object): return fallback def cloud_size_for_constraints(self, constraints): + specified_size = constraints.get('instance_type') want_value = lambda key: self.coerce_int(constraints.get(key), 0) wants = {'cores': want_value('min_cores_per_node'), 'ram': want_value('min_ram_mb_per_node'), 'scratch': want_value('min_scratch_mb_per_node')} + # EC2 node sizes are identified by id. GCE sizes are identified by name. for size in self.cloud_sizes: - if size.meets_constraints(**wants): - return size + if (size.meets_constraints(**wants) and + (specified_size is None or + size.id == specified_size or size.name == specified_size)): + return size return None def servers_for_queue(self, queue): @@ -92,14 +119,13 @@ class ServerCalculator(object): cloud_size = self.cloud_size_for_constraints(constraints) if cloud_size is None: unsatisfiable_jobs[job['uuid']] = ( - 'Requirements for a single node exceed the available ' - 'cloud node size') + "Constraints cannot be satisfied by any node type") elif (want_count > self.max_nodes): unsatisfiable_jobs[job['uuid']] = ( "Job's min_nodes constraint is greater than the configured " "max_nodes (%d)" % self.max_nodes) elif (want_count*cloud_size.price <= self.max_price): - servers.extend([cloud_size.real] * want_count) + servers.extend([cloud_size] * want_count) else: unsatisfiable_jobs[job['uuid']] = ( "Job's price (%d) is above system's max_price " @@ -113,7 +139,7 @@ class ServerCalculator(object): for s in self.cloud_sizes: if s.id == sizeid: return s - return None + return self.InvalidCloudSize() class JobQueueMonitorActor(clientactor.RemotePollLoopActor): @@ -151,22 +177,47 @@ class JobQueueMonitorActor(clientactor.RemotePollLoopActor): def _send_request(self): queuelist = [] if self.slurm_queue: - # cpus, memory, tempory disk space, reason, job name - squeue_out = subprocess.check_output(["squeue", "--state=PENDING", "--noheader", "--format=%c|%m|%d|%r|%j"]) + # cpus, memory, tempory disk space, reason, job name, feature constraints, priority + squeue_out = subprocess.check_output(["squeue", "--state=PENDING", "--noheader", "--format=%c|%m|%d|%r|%j|%f|%Q"]) for out in squeue_out.splitlines(): try: - cpu, ram, disk, reason, jobname = out.split("|", 4) - if ("ReqNodeNotAvail" in reason) or ("Resources" in reason) or ("Priority" in reason): - queuelist.append({ - "uuid": jobname, - "runtime_constraints": { - "min_cores_per_node": cpu, - "min_ram_mb_per_node": self.coerce_to_mb(ram), - "min_scratch_mb_per_node": self.coerce_to_mb(disk) - } - }) + cpu, ram, disk, reason, jobname, features, priority = out.split("|", 6) except ValueError: - pass + self._logger.warning("ignored malformed line in squeue output: %r", out) + continue + if '-dz642-' not in jobname: + continue + if not re.search(r'BadConstraints|ReqNodeNotAvail|Resources|Priority', reason): + continue + + for feature in features.split(','): + m = re.match(r'instancetype=(.*)', feature) + if not m: + continue + instance_type = m.group(1) + # Ignore cpu/ram/scratch requirements, bring up + # the requested node type. + queuelist.append({ + "uuid": jobname, + "runtime_constraints": { + "instance_type": instance_type, + }, + "priority": int(priority) + }) + break + else: + # No instance type specified. Choose a node type + # to suit cpu/ram/scratch requirements. + queuelist.append({ + "uuid": jobname, + "runtime_constraints": { + "min_cores_per_node": cpu, + "min_ram_mb_per_node": self.coerce_to_mb(ram), + "min_scratch_mb_per_node": self.coerce_to_mb(disk) + }, + "priority": int(priority) + }) + queuelist.sort(key=lambda x: x.get('priority', 1), reverse=True) if self.jobs_queue: queuelist.extend(self._client.jobs().queue().execute()['items']) @@ -197,5 +248,5 @@ class JobQueueMonitorActor(clientactor.RemotePollLoopActor): job_uuid, error) self._logger.debug("Calculated wishlist: %s", - ', '.join(s.name for s in server_list) or "(empty)") + ', '.join(s.id for s in server_list) or "(empty)") return super(JobQueueMonitorActor, self)._got_response(server_list)