X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/bea445d5d02adb035a126582e5c0358ec5db5c75..120a2268606d73317ab2353d79c3046017300f81:/sdk/cwl/arvados_cwl/arvcontainer.py diff --git a/sdk/cwl/arvados_cwl/arvcontainer.py b/sdk/cwl/arvados_cwl/arvcontainer.py index 17fe8cb5c4..c2029b965b 100644 --- a/sdk/cwl/arvados_cwl/arvcontainer.py +++ b/sdk/cwl/arvados_cwl/arvcontainer.py @@ -1,19 +1,27 @@ import logging +import json +import os + +from cwltool.errors import WorkflowException +from cwltool.process import get_feature, UnsupportedRequirement, shortname +from cwltool.pathmapper import adjustFiles +from cwltool.utils import aslist + import arvados.collection -from cwltool.process import get_feature, adjustFiles + from .arvdocker import arv_docker_get_image from . import done -from cwltool.errors import WorkflowException -from cwltool.process import UnsupportedRequirement +from .runner import Runner, arvados_jobs_image logger = logging.getLogger('arvados.cwl-runner') class ArvadosContainer(object): - """Submit and manage a Crunch job for executing a CWL CommandLineTool.""" + """Submit and manage a Crunch container request for executing a CWL CommandLineTool.""" def __init__(self, runner): self.arvrunner = runner self.running = False + self.uuid = None def update_pipeline_component(self, r): pass @@ -23,56 +31,55 @@ class ArvadosContainer(object): "command": self.command_line, "owner_uuid": self.arvrunner.project_uuid, "name": self.name, - "output_path": "/var/spool/cwl", - "cwd": "/var/spool/cwl", + "output_path": self.outdir, + "cwd": self.outdir, "priority": 1, "state": "Committed" } runtime_constraints = {} mounts = { - "/var/spool/cwl": { + self.outdir: { "kind": "tmp" } } + dirs = set() for f in self.pathmapper.files(): - _, p = self.pathmapper.mapper(f) - mounts[p] = { - "kind": "collection", - "portable_data_hash": p[6:] - } + _, p, tp = self.pathmapper.mapper(f) + if tp == "Directory" and '/' not in p[6:]: + mounts[p] = { + "kind": "collection", + "portable_data_hash": p[6:] + } + dirs.add(p[6:]) + for f in self.pathmapper.files(): + _, p, tp = self.pathmapper.mapper(f) + if p[6:].split("/")[0] not in dirs: + mounts[p] = { + "kind": "collection", + "portable_data_hash": p[6:] + } - if self.generatefiles: - raise UnsupportedRequirement("Stdin redirection currently not suppported") + if self.generatefiles["listing"]: + raise UnsupportedRequirement("Generate files not supported") - vwd = arvados.collection.Collection() - container_request["task.vwd"] = {} - for t in self.generatefiles: - if isinstance(self.generatefiles[t], dict): - src, rest = self.arvrunner.fs_access.get_collection(self.generatefiles[t]["path"].replace("$(task.keep)/", "keep:")) - vwd.copy(rest, t, source_collection=src) - else: - with vwd.open(t, "w") as f: - f.write(self.generatefiles[t]) - vwd.save_new() - # TODO - # for t in self.generatefiles: - # container_request["task.vwd"][t] = "$(task.keep)/%s/%s" % (vwd.portable_data_hash(), t) - - container_request["environment"] = {"TMPDIR": "/tmp"} + container_request["environment"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir} if self.environment: container_request["environment"].update(self.environment) if self.stdin: raise UnsupportedRequirement("Stdin redirection currently not suppported") + if self.stderr: + raise UnsupportedRequirement("Stderr redirection currently not suppported") + if self.stdout: mounts["stdout"] = {"kind": "file", - "path": "/var/spool/cwl/%s" % (self.stdout)} + "path": "%s/%s" % (self.outdir, self.stdout)} (docker_req, docker_is_req) = get_feature(self, "DockerRequirement") if not docker_req: - docker_req = {"dockerImageId": "arvados/jobs"} + docker_req = {"dockerImageId": arvados_jobs_image(self.arvrunner)} container_request["container_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, @@ -83,7 +90,18 @@ class ArvadosContainer(object): if resources is not None: runtime_constraints["vcpus"] = resources.get("cores", 1) runtime_constraints["ram"] = resources.get("ram") * 2**20 - #runtime_constraints["min_scratch_mb_per_node"] = resources.get("tmpdirSize", 0) + resources.get("outdirSize", 0) + + api_req, _ = get_feature(self, "http://arvados.org/cwl#APIRequirement") + if api_req: + runtime_constraints["API"] = True + + runtime_req, _ = get_feature(self, "http://arvados.org/cwl#RuntimeConstraints") + if runtime_req: + runtime_constraints["keep_cache_ram"] = runtime_req["keep_cache"] + + partition_req, _ = get_feature(self, "http://arvados.org/cwl#PartitionRequirement") + if partition_req: + runtime_constraints["partition"] = aslist(partition_req["partition"]) container_request["mounts"] = mounts container_request["runtime_constraints"] = runtime_constraints @@ -93,12 +111,16 @@ class ArvadosContainer(object): body=container_request ).execute(num_retries=self.arvrunner.num_retries) - self.arvrunner.jobs[response["container_uuid"]] = self + self.arvrunner.processes[response["container_uuid"]] = self - logger.info("Container %s (%s) request state is %s", self.name, response["container_uuid"], response["state"]) + container = self.arvrunner.api.containers().get( + uuid=response["container_uuid"] + ).execute(num_retries=self.arvrunner.num_retries) + + logger.info("Container request %s (%s) state is %s with container %s %s", self.name, response["uuid"], response["state"], container["uuid"], container["state"]) - if response["state"] == "Final": - self.done(response) + if container["state"] in ("Complete", "Cancelled"): + self.done(container) except Exception as e: logger.error("Got error %s" % str(e)) self.output_callback({}, "permanentFail") @@ -106,14 +128,24 @@ class ArvadosContainer(object): def done(self, record): try: if record["state"] == "Complete": - processStatus = "success" + rcode = record["exit_code"] + if self.successCodes and rcode in self.successCodes: + processStatus = "success" + elif self.temporaryFailCodes and rcode in self.temporaryFailCodes: + processStatus = "temporaryFail" + elif self.permanentFailCodes and rcode in self.permanentFailCodes: + processStatus = "permanentFail" + elif rcode == 0: + processStatus = "success" + else: + processStatus = "permanentFail" else: processStatus = "permanentFail" try: outputs = {} if record["output"]: - outputs = done.done(self, record, "/tmp", "/var/spool/cwl", "/keep") + outputs = done.done(self, record, "/tmp", self.outdir, "/keep") except WorkflowException as e: logger.error("Error while collecting container outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False)) processStatus = "permanentFail" @@ -123,4 +155,86 @@ class ArvadosContainer(object): self.output_callback(outputs, processStatus) finally: - del self.arvrunner.jobs[record["uuid"]] + del self.arvrunner.processes[record["uuid"]] + + +class RunnerContainer(Runner): + """Submit and manage a container that runs arvados-cwl-runner.""" + + def arvados_job_spec(self, dry_run=False, pull_image=True, **kwargs): + """Create an Arvados container request for this workflow. + + The returned dict can be used to create a container passed as + the +body+ argument to container_requests().create(). + """ + + workflowmapper = super(RunnerContainer, self).arvados_job_spec(dry_run=dry_run, pull_image=pull_image, **kwargs) + + with arvados.collection.Collection(api_client=self.arvrunner.api, + keep_client=self.arvrunner.keep_client, + num_retries=self.arvrunner.num_retries) as jobobj: + with jobobj.open("cwl.input.json", "w") as f: + json.dump(self.job_order, f, sort_keys=True, indent=4) + jobobj.save_new(owner_uuid=self.arvrunner.project_uuid) + + workflowname = os.path.basename(self.tool.tool["id"]) + workflowpath = "/var/lib/cwl/workflow/%s" % workflowname + workflowcollection = workflowmapper.mapper(self.tool.tool["id"])[1] + workflowcollection = workflowcollection[5:workflowcollection.index('/')] + jobpath = "/var/lib/cwl/job/cwl.input.json" + + command = ["arvados-cwl-runner", "--local", "--api=containers"] + if self.output_name: + command.append("--output-name=" + self.output_name) + command.extend([workflowpath, jobpath]) + + return { + "command": command, + "owner_uuid": self.arvrunner.project_uuid, + "name": self.name, + "output_path": "/var/spool/cwl", + "cwd": "/var/spool/cwl", + "priority": 1, + "state": "Committed", + "container_image": arvados_jobs_image(self.arvrunner), + "mounts": { + "/var/lib/cwl/workflow": { + "kind": "collection", + "portable_data_hash": "%s" % workflowcollection + }, + jobpath: { + "kind": "collection", + "portable_data_hash": "%s/cwl.input.json" % jobobj.portable_data_hash() + }, + "stdout": { + "kind": "file", + "path": "/var/spool/cwl/cwl.output.json" + }, + "/var/spool/cwl": { + "kind": "collection", + "writable": True + } + }, + "runtime_constraints": { + "vcpus": 1, + "ram": 1024*1024*256, + "API": True + } + } + + def run(self, *args, **kwargs): + kwargs["keepprefix"] = "keep:" + job_spec = self.arvados_job_spec(*args, **kwargs) + job_spec.setdefault("owner_uuid", self.arvrunner.project_uuid) + + response = self.arvrunner.api.container_requests().create( + body=job_spec + ).execute(num_retries=self.arvrunner.num_retries) + + self.uuid = response["uuid"] + self.arvrunner.processes[response["container_uuid"]] = self + + logger.info("Submitted container %s", response["uuid"]) + + if response["state"] in ("Complete", "Failed", "Cancelled"): + self.done(response)