X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/0561bd0c3c07257fd58ded6c7cfa5feeae97af57..540ecd0ae604df1cf02a63515e6e9e8e04e6e64a:/sdk/cwl/arvados_cwl/runner.py diff --git a/sdk/cwl/arvados_cwl/runner.py b/sdk/cwl/arvados_cwl/runner.py index 683f548c48..053c99502b 100644 --- a/sdk/cwl/arvados_cwl/runner.py +++ b/sdk/cwl/arvados_cwl/runner.py @@ -13,12 +13,11 @@ from StringIO import StringIO from schema_salad.sourceline import SourceLine -import cwltool.draft2tool -from cwltool.draft2tool import CommandLineTool +from cwltool.command_line_tool import CommandLineTool import cwltool.workflow from cwltool.process import get_feature, scandeps, UnsupportedRequirement, normalizeFilesDirs, shortname from cwltool.load_tool import fetch_document -from cwltool.pathmapper import adjustFileObjs, adjustDirObjs +from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, visit_class from cwltool.utils import aslist from cwltool.builder import substitute from cwltool.pack import pack @@ -46,6 +45,22 @@ def trim_anonymous_location(obj): if obj.get("location", "").startswith("_:"): del obj["location"] +def remove_redundant_fields(obj): + for field in ("path", "nameext", "nameroot", "dirname"): + if field in obj: + del obj[field] + +def find_defaults(d, op): + if isinstance(d, list): + for i in d: + find_defaults(i, op) + elif isinstance(d, dict): + if "default" in d: + op(d) + else: + for i in d.itervalues(): + find_defaults(i, op) + def upload_dependencies(arvrunner, name, document_loader, workflowobj, uri, loadref_run, include_primary=True): """Upload the dependencies of the workflowobj document to Keep. @@ -101,6 +116,23 @@ def upload_dependencies(arvrunner, name, document_loader, for s in workflowobj["$schemas"]: sc.append({"class": "File", "location": s}) + def capture_default(obj): + remove = [False] + def add_default(f): + if "location" not in f and "path" in f: + f["location"] = f["path"] + del f["path"] + if "location" in f and not arvrunner.fs_access.exists(f["location"]): + # Remove from sc + sc[:] = [x for x in sc if x["location"] != f["location"]] + # Delete "default" from workflowobj + remove[0] = True + visit_class(obj["default"], ("File", "Directory"), add_default) + if remove[0]: + del obj["default"] + + find_defaults(workflowobj, capture_default) + mapper = ArvPathMapper(arvrunner, sc, "", "keep:%s", "keep:%s/%s", @@ -128,22 +160,43 @@ def upload_docker(arvrunner, tool): if isinstance(tool, CommandLineTool): (docker_req, docker_is_req) = get_feature(tool, "DockerRequirement") if docker_req: - if docker_req.get("dockerOutputDirectory"): + if docker_req.get("dockerOutputDirectory") and arvrunner.work_api != "containers": # TODO: can be supported by containers API, but not jobs API. raise SourceLine(docker_req, "dockerOutputDirectory", UnsupportedRequirement).makeError( "Option 'dockerOutputDirectory' of DockerRequirement not supported.") arv_docker_get_image(arvrunner.api, docker_req, True, arvrunner.project_uuid) + else: + arv_docker_get_image(arvrunner.api, {"dockerPull": "arvados/jobs"}, True, arvrunner.project_uuid) elif isinstance(tool, cwltool.workflow.Workflow): for s in tool.steps: upload_docker(arvrunner, s.embedded_tool) -def packed_workflow(arvrunner, tool): +def packed_workflow(arvrunner, tool, merged_map): """Create a packed workflow. A "packed" workflow is one where all the components have been combined into a single document.""" - return pack(tool.doc_loader, tool.doc_loader.fetch(tool.tool["id"]), - tool.tool["id"], tool.metadata) + rewrites = {} + packed = pack(tool.doc_loader, tool.doc_loader.fetch(tool.tool["id"]), + tool.tool["id"], tool.metadata, rewrite_out=rewrites) + + rewrite_to_orig = {} + for k,v in rewrites.items(): + rewrite_to_orig[v] = k + + def visit(v, cur_id): + if isinstance(v, dict): + if v.get("class") in ("CommandLineTool", "Workflow"): + cur_id = rewrite_to_orig.get(v["id"], v["id"]) + if "location" in v and not v["location"].startswith("keep:"): + v["location"] = merged_map[cur_id][v["location"]] + for l in v: + visit(v[l], cur_id) + if isinstance(v, list): + for l in v: + visit(l, cur_id) + visit(packed, None) + return packed def tag_git_version(packed): if tool.tool["id"].startswith("file://"): @@ -156,12 +209,8 @@ def tag_git_version(packed): packed["http://schema.org/version"] = githash -def upload_job_order(arvrunner, name, tool, job_order): - """Upload local files referenced in the input object and return updated input - object with 'location' updated to the proper keep references. - """ - - for t in tool.tool["inputs"]: +def discover_secondary_files(inputs, job_order): + for t in inputs: def setSecondary(fileobj): if isinstance(fileobj, dict) and fileobj.get("class") == "File": if "secondaryFiles" not in fileobj: @@ -174,6 +223,13 @@ def upload_job_order(arvrunner, name, tool, job_order): if shortname(t["id"]) in job_order and t.get("secondaryFiles"): setSecondary(job_order[shortname(t["id"])]) +def upload_job_order(arvrunner, name, tool, job_order): + """Upload local files referenced in the input object and return updated input + object with 'location' updated to the proper keep references. + """ + + discover_secondary_files(tool.tool["inputs"], job_order) + jobmapper = upload_dependencies(arvrunner, name, tool.doc_loader, @@ -191,16 +247,18 @@ def upload_job_order(arvrunner, name, tool, job_order): return job_order -def upload_workflow_deps(arvrunner, tool, override_tools): +def upload_workflow_deps(arvrunner, tool): # Ensure that Docker images needed by this workflow are available upload_docker(arvrunner, tool) document_loader = tool.doc_loader + merged_map = {} + def upload_tool_deps(deptool): if "id" in deptool: - upload_dependencies(arvrunner, + pm = upload_dependencies(arvrunner, "%s dependencies" % (shortname(deptool["id"])), document_loader, deptool, @@ -208,10 +266,15 @@ def upload_workflow_deps(arvrunner, tool, override_tools): False, include_primary=False) document_loader.idx[deptool["id"]] = deptool - override_tools[deptool["id"]] = json.dumps(deptool) + toolmap = {} + for k,v in pm.items(): + toolmap[k] = v.resolved + merged_map[deptool["id"]] = toolmap tool.visit(upload_tool_deps) + return merged_map + def arvados_jobs_image(arvrunner, img): """Determine if the right arvados/jobs image version is available. If not, try to pull and upload it.""" @@ -253,11 +316,18 @@ class Runner(object): def __init__(self, runner, tool, job_order, enable_reuse, output_name, output_tags, submit_runner_ram=0, name=None, on_error=None, submit_runner_image=None, - intermediate_output_ttl=0): + intermediate_output_ttl=0, merged_map=None, priority=None, + secret_store=None): self.arvrunner = runner self.tool = tool self.job_order = job_order self.running = False + if enable_reuse: + # If reuse is permitted by command line arguments but + # disabled by the workflow itself, disable it. + reuse_req, _ = get_feature(self.tool, "http://arvados.org/cwl#ReuseRequirement") + if reuse_req: + enable_reuse = reuse_req["enableReuse"] self.enable_reuse = enable_reuse self.uuid = None self.final_output = None @@ -267,6 +337,8 @@ class Runner(object): self.on_error = on_error self.jobs_image = submit_runner_image or "arvados/jobs:"+__version__ self.intermediate_output_ttl = intermediate_output_ttl + self.priority = priority + self.secret_store = secret_store if submit_runner_ram: self.submit_runner_ram = submit_runner_ram @@ -276,6 +348,8 @@ class Runner(object): if self.submit_runner_ram <= 0: raise Exception("Value of --submit-runner-ram must be greater than zero") + self.merged_map = merged_map or {} + def update_pipeline_component(self, record): pass