X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/6c43be47cb3756a0e6ffc924572259d1a1c8f2c3..a605fc29fd7f79b2882625c99a72e998157fa5bc:/sdk/cwl/arvados_cwl/runner.py diff --git a/sdk/cwl/arvados_cwl/runner.py b/sdk/cwl/arvados_cwl/runner.py index 0cc23ab459..e5b4e006e8 100644 --- a/sdk/cwl/arvados_cwl/runner.py +++ b/sdk/cwl/arvados_cwl/runner.py @@ -1,73 +1,152 @@ import os import urlparse from functools import partial +import logging +import json +import re +from cStringIO import StringIO +import cwltool.draft2tool from cwltool.draft2tool import CommandLineTool import cwltool.workflow -from cwltool.process import get_feature, scandeps, adjustFiles +from cwltool.process import get_feature, scandeps, UnsupportedRequirement, normalizeFilesDirs from cwltool.load_tool import fetch_document +from cwltool.pathmapper import adjustFileObjs, adjustDirObjs + +import arvados.collection +import ruamel.yaml as yaml from .arvdocker import arv_docker_get_image from .pathmapper import ArvPathMapper +logger = logging.getLogger('arvados.cwl-runner') + +cwltool.draft2tool.ACCEPTLIST_RE = re.compile(r".*") + +def trim_listing(obj): + """Remove 'listing' field from Directory objects that are keep references. + + When Directory objects represent Keep references, it redundant and + potentially very expensive to pass fully enumerated Directory objects + between instances of cwl-runner (e.g. a submitting a job, or using the + RunInSingleContainer feature), so delete the 'listing' field when it is + safe to do so. + """ + + if obj.get("location", "").startswith("keep:") and "listing" in obj: + del obj["listing"] + if obj.get("location", "").startswith("_:"): + del obj["location"] + +def upload_dependencies(arvrunner, name, document_loader, + workflowobj, uri, loadref_run): + """Upload the dependencies of the workflowobj document to Keep. + + Returns a pathmapper object mapping local paths to keep references. Also + does an in-place update of references in "workflowobj". + + Use scandeps to find $import, $include, $schemas, run, File and Directory + fields that represent external references. + + If workflowobj has an "id" field, this will reload the document to ensure + it is scanning the raw document prior to preprocessing. + """ + + loaded = set() + def loadref(b, u): + joined = urlparse.urljoin(b, u) + defrg, _ = urlparse.urldefrag(joined) + if defrg not in loaded: + loaded.add(defrg) + # Use fetch_text to get raw file (before preprocessing). + text = document_loader.fetch_text(defrg) + if isinstance(text, bytes): + textIO = StringIO(text.decode('utf-8')) + else: + textIO = StringIO(text) + return yaml.safe_load(textIO) + else: + return {} + + if loadref_run: + loadref_fields = set(("$import", "run")) + else: + loadref_fields = set(("$import",)) + + scanobj = workflowobj + if "id" in workflowobj: + # Need raw file content (before preprocessing) to ensure + # that external references in $include and $mixin are captured. + scanobj = loadref("", workflowobj["id"]) + + sc = scandeps(uri, scanobj, + loadref_fields, + set(("$include", "$schemas", "location")), + loadref) + + normalizeFilesDirs(sc) + + if "id" in workflowobj: + sc.append({"class": "File", "location": workflowobj["id"]}) + + mapper = ArvPathMapper(arvrunner, sc, "", + "keep:%s", + "keep:%s/%s", + name=name) + + def setloc(p): + if "location" in p and (not p["location"].startswith("_:")) and (not p["location"].startswith("keep:")): + p["location"] = mapper.mapper(p["location"]).resolved + adjustFileObjs(workflowobj, setloc) + adjustDirObjs(workflowobj, setloc) + + return mapper + + +def upload_docker(arvrunner, tool): + if isinstance(tool, CommandLineTool): + (docker_req, docker_is_req) = get_feature(tool, "DockerRequirement") + if docker_req: + arv_docker_get_image(arvrunner.api, docker_req, True, arvrunner.project_uuid) + elif isinstance(tool, cwltool.workflow.Workflow): + for s in tool.steps: + upload_docker(arvrunner, s.embedded_tool) + + class Runner(object): - def __init__(self, runner, tool, job_order, enable_reuse): + def __init__(self, runner, tool, job_order, enable_reuse, output_name): self.arvrunner = runner self.tool = tool self.job_order = job_order self.running = False self.enable_reuse = enable_reuse + self.uuid = None + self.final_output = None + self.output_name = output_name def update_pipeline_component(self, record): pass - def upload_docker(self, tool): - if isinstance(tool, CommandLineTool): - (docker_req, docker_is_req) = get_feature(tool, "DockerRequirement") - if docker_req: - arv_docker_get_image(self.arvrunner.api, docker_req, True, self.arvrunner.project_uuid) - elif isinstance(tool, cwltool.workflow.Workflow): - for s in tool.steps: - self.upload_docker(s.embedded_tool) - - def arvados_job_spec(self, *args, **kwargs): - self.upload_docker(self.tool) - - workflowfiles = set() - jobfiles = set() - workflowfiles.add(self.tool.tool["id"]) + upload_docker(self.arvrunner, self.tool) self.name = os.path.basename(self.tool.tool["id"]) - def visitFiles(files, path): - files.add(path) - return path - - document_loader, workflowobj, uri = fetch_document(self.tool.tool["id"]) - def loadref(b, u): - return document_loader.fetch(urlparse.urljoin(b, u)) - - sc = scandeps(uri, workflowobj, - set(("$import", "run")), - set(("$include", "$schemas", "path")), - loadref) - adjustFiles(sc, partial(visitFiles, workflowfiles)) - adjustFiles(self.job_order, partial(visitFiles, jobfiles)) - - workflowmapper = ArvPathMapper(self.arvrunner, workflowfiles, "", - "%s", - "%s/%s", - name=self.name, - **kwargs) + workflowmapper = upload_dependencies(self.arvrunner, + self.name, + self.tool.doc_loader, + self.tool.tool, + self.tool.tool["id"], + True) - jobmapper = ArvPathMapper(self.arvrunner, jobfiles, "", - "%s", - "%s/%s", - name=os.path.basename(self.job_order.get("id", "#")), - **kwargs) + jobmapper = upload_dependencies(self.arvrunner, + os.path.basename(self.job_order.get("id", "#")), + self.tool.doc_loader, + self.job_order, + self.job_order.get("id", "#"), + False) - adjustFiles(self.job_order, lambda p: jobmapper.mapper(p)[1]) + adjustDirObjs(self.job_order, trim_listing) if "id" in self.job_order: del self.job_order["id"] @@ -77,22 +156,36 @@ class Runner(object): def done(self, record): if record["state"] == "Complete": - processStatus = "success" + if record.get("exit_code") is not None: + if record["exit_code"] == 33: + processStatus = "UnsupportedRequirement" + elif record["exit_code"] == 0: + processStatus = "success" + else: + processStatus = "permanentFail" + else: + processStatus = "success" else: processStatus = "permanentFail" outputs = None try: try: - outc = arvados.collection.Collection(record["output"]) + self.final_output = record["output"] + outc = arvados.collection.CollectionReader(self.final_output, + api_client=self.arvrunner.api, + keep_client=self.arvrunner.keep_client, + num_retries=self.arvrunner.num_retries) with outc.open("cwl.output.json") as f: outputs = json.load(f) - def keepify(path): + def keepify(fileobj): + path = fileobj["location"] if not path.startswith("keep:"): - return "keep:%s/%s" % (record["output"], path) - adjustFiles(outputs, keepify) + fileobj["location"] = "keep:%s/%s" % (record["output"], path) + adjustFileObjs(outputs, keepify) + adjustDirObjs(outputs, keepify) except Exception as e: logger.error("While getting final output object: %s", e) self.arvrunner.output_callback(outputs, processStatus) finally: - del self.arvrunner.jobs[record["uuid"]] + del self.arvrunner.processes[record["uuid"]]