X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/6cd1dd7a2a55a80dd207b70fbb10a72e68bc7ea4..a9f1adf0a3e2df296ce0a8c0a1d735b7e5044baa:/sdk/cwl/arvados_cwl/arvcontainer.py diff --git a/sdk/cwl/arvados_cwl/arvcontainer.py b/sdk/cwl/arvados_cwl/arvcontainer.py index fe0f7cacf8..9bf93e7c56 100644 --- a/sdk/cwl/arvados_cwl/arvcontainer.py +++ b/sdk/cwl/arvados_cwl/arvcontainer.py @@ -1,52 +1,70 @@ +import logging +import json +import os + +from cwltool.errors import WorkflowException +from cwltool.process import get_feature, adjustFiles, UnsupportedRequirement, shortname + +import arvados.collection + +from .arvdocker import arv_docker_get_image +from . import done +from .runner import Runner + +logger = logging.getLogger('arvados.cwl-runner') + class ArvadosContainer(object): - """Submit and manage a Crunch job for executing a CWL CommandLineTool.""" + """Submit and manage a Crunch container request for executing a CWL CommandLineTool.""" def __init__(self, runner): self.arvrunner = runner self.running = False + self.uuid = None + + def update_pipeline_component(self, r): + pass def run(self, dry_run=False, pull_image=True, **kwargs): container_request = { - "command": self.command_line + "command": self.command_line, "owner_uuid": self.arvrunner.project_uuid, "name": self.name, - "output_path", "/var/spool/cwl", - "cwd", "/var/spool/cwl", - "priority": 1 + "output_path": self.outdir, + "cwd": self.outdir, + "priority": 1, + "state": "Committed" } runtime_constraints = {} - mounts = {} + mounts = { + self.outdir: { + "kind": "tmp" + } + } + + for f in self.pathmapper.files(): + _, p = self.pathmapper.mapper(f) + mounts[p] = { + "kind": "collection", + "portable_data_hash": p[6:] + } if self.generatefiles: - vwd = arvados.collection.Collection() - container_request["task.vwd"] = {} - for t in self.generatefiles: - if isinstance(self.generatefiles[t], dict): - src, rest = self.arvrunner.fs_access.get_collection(self.generatefiles[t]["path"].replace("$(task.keep)/", "keep:")) - vwd.copy(rest, t, source_collection=src) - else: - with vwd.open(t, "w") as f: - f.write(self.generatefiles[t]) - vwd.save_new() - # TODO - # for t in self.generatefiles: - # container_request["task.vwd"][t] = "$(task.keep)/%s/%s" % (vwd.portable_data_hash(), t) + raise UnsupportedRequirement("Generate files not supported") container_request["environment"] = {"TMPDIR": "/tmp"} if self.environment: container_request["environment"].update(self.environment) - # TODO, not supported - #if self.stdin: - # container_request["task.stdin"] = self.pathmapper.mapper(self.stdin)[1] + if self.stdin: + raise UnsupportedRequirement("Stdin redirection currently not suppported") if self.stdout: mounts["stdout"] = {"kind": "file", - "path": self.stdout} + "path": "%s/%s" % (self.outdir, self.stdout)} (docker_req, docker_is_req) = get_feature(self, "DockerRequirement") if not docker_req: - docker_req = "arvados/jobs" + docker_req = {"dockerImageId": "arvados/jobs"} container_request["container_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, @@ -57,7 +75,6 @@ class ArvadosContainer(object): if resources is not None: runtime_constraints["vcpus"] = resources.get("cores", 1) runtime_constraints["ram"] = resources.get("ram") * 2**20 - #runtime_constraints["min_scratch_mb_per_node"] = resources.get("tmpdirSize", 0) + resources.get("outdirSize", 0) container_request["mounts"] = mounts container_request["runtime_constraints"] = runtime_constraints @@ -67,11 +84,11 @@ class ArvadosContainer(object): body=container_request ).execute(num_retries=self.arvrunner.num_retries) - self.arvrunner.jobs[response["uuid"]] = self + self.arvrunner.processes[response["container_uuid"]] = self - logger.info("Container %s (%s) is %s", self.name, response["uuid"], response["state"]) + logger.info("Container %s (%s) request state is %s", self.name, response["container_uuid"], response["state"]) - if response["state"] in ("Complete", "Cancelled"): + if response["state"] == "Final": self.done(response) except Exception as e: logger.error("Got error %s" % str(e)) @@ -80,76 +97,26 @@ class ArvadosContainer(object): def done(self, record): try: if record["state"] == "Complete": - processStatus = "success" + rcode = record["exit_code"] + if self.successCodes and rcode in self.successCodes: + processStatus = "success" + elif self.temporaryFailCodes and rcode in self.temporaryFailCodes: + processStatus = "temporaryFail" + elif self.permanentFailCodes and rcode in self.permanentFailCodes: + processStatus = "permanentFail" + elif rcode == 0: + processStatus = "success" + else: + processStatus = "permanentFail" else: processStatus = "permanentFail" try: outputs = {} if record["output"]: - logc = arvados.collection.Collection(record["log"]) - log = logc.open(logc.keys()[0]) - tmpdir = None - outdir = None - keepdir = None - for l in log: - # Determine the tmpdir, outdir and keepdir paths from - # the job run. Unfortunately, we can't take the first - # values we find (which are expected to be near the - # top) and stop scanning because if the node fails and - # the job restarts on a different node these values - # will different runs, and we need to know about the - # final run that actually produced output. - - g = tmpdirre.match(l) - if g: - tmpdir = g.group(1) - g = outdirre.match(l) - if g: - outdir = g.group(1) - g = keepre.match(l) - if g: - keepdir = g.group(1) - - colname = "Output %s of %s" % (record["output"][0:7], self.name) - - # check if collection already exists with same owner, name and content - collection_exists = self.arvrunner.api.collections().list( - filters=[["owner_uuid", "=", self.arvrunner.project_uuid], - ['portable_data_hash', '=', record["output"]], - ["name", "=", colname]] - ).execute(num_retries=self.arvrunner.num_retries) - - if not collection_exists["items"]: - # Create a collection located in the same project as the - # pipeline with the contents of the output. - # First, get output record. - collections = self.arvrunner.api.collections().list( - limit=1, - filters=[['portable_data_hash', '=', record["output"]]], - select=["manifest_text"] - ).execute(num_retries=self.arvrunner.num_retries) - - if not collections["items"]: - raise WorkflowException( - "Job output '%s' cannot be found on API server" % ( - record["output"])) - - # Create new collection in the parent project - # with the output contents. - self.arvrunner.api.collections().create(body={ - "owner_uuid": self.arvrunner.project_uuid, - "name": colname, - "portable_data_hash": record["output"], - "manifest_text": collections["items"][0]["manifest_text"] - }, ensure_unique_name=True).execute( - num_retries=self.arvrunner.num_retries) - - self.builder.outdir = outdir - self.builder.pathmapper.keepdir = keepdir - outputs = self.collect_outputs("keep:" + record["output"]) + outputs = done.done(self, record, "/tmp", self.outdir, "/keep") except WorkflowException as e: - logger.error("Error while collecting job outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False)) + logger.error("Error while collecting container outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False)) processStatus = "permanentFail" except Exception as e: logger.exception("Got unknown exception while collecting job outputs:") @@ -157,4 +124,84 @@ class ArvadosContainer(object): self.output_callback(outputs, processStatus) finally: - del self.arvrunner.jobs[record["uuid"]] + del self.arvrunner.processes[record["uuid"]] + + +class RunnerContainer(Runner): + """Submit and manage a container that runs arvados-cwl-runner.""" + + def arvados_job_spec(self, dry_run=False, pull_image=True, **kwargs): + """Create an Arvados container request for this workflow. + + The returned dict can be used to create a container passed as + the +body+ argument to container_requests().create(). + """ + + workflowmapper = super(RunnerContainer, self).arvados_job_spec(dry_run=dry_run, pull_image=pull_image, **kwargs) + + with arvados.collection.Collection(api_client=self.arvrunner.api) as jobobj: + with jobobj.open("cwl.input.json", "w") as f: + json.dump(self.job_order, f, sort_keys=True, indent=4) + jobobj.save_new(owner_uuid=self.arvrunner.project_uuid) + + workflowname = os.path.basename(self.tool.tool["id"]) + workflowpath = "/var/lib/cwl/workflow/%s" % workflowname + workflowcollection = workflowmapper.mapper(self.tool.tool["id"])[1] + workflowcollection = workflowcollection[5:workflowcollection.index('/')] + jobpath = "/var/lib/cwl/job/cwl.input.json" + + container_image = arv_docker_get_image(self.arvrunner.api, + {"dockerImageId": "arvados/jobs"}, + pull_image, + self.arvrunner.project_uuid) + + return { + "command": ["arvados-cwl-runner", "--local", "--api=containers", workflowpath, jobpath], + "owner_uuid": self.arvrunner.project_uuid, + "name": self.name, + "output_path": "/var/spool/cwl", + "cwd": "/var/spool/cwl", + "priority": 1, + "state": "Committed", + "container_image": container_image, + "mounts": { + "/var/lib/cwl/workflow": { + "kind": "collection", + "portable_data_hash": "%s" % workflowcollection + }, + jobpath: { + "kind": "collection", + "portable_data_hash": "%s/cwl.input.json" % jobobj.portable_data_hash() + }, + "stdout": { + "kind": "file", + "path": "/var/spool/cwl/cwl.output.json" + }, + "/var/spool/cwl": { + "kind": "collection", + "writable": True + } + }, + "runtime_constraints": { + "vcpus": 1, + "ram": 1024*1024*256, + "API": True + } + } + + def run(self, *args, **kwargs): + kwargs["keepprefix"] = "keep:" + job_spec = self.arvados_job_spec(*args, **kwargs) + job_spec.setdefault("owner_uuid", self.arvrunner.project_uuid) + + response = self.arvrunner.api.container_requests().create( + body=job_spec + ).execute(num_retries=self.arvrunner.num_retries) + + self.uuid = response["uuid"] + self.arvrunner.processes[response["container_uuid"]] = self + + logger.info("Submitted container %s", response["uuid"]) + + if response["state"] in ("Complete", "Failed", "Cancelled"): + self.done(response)