19385: Messy work in progress for uploading workflows to collections
[arvados.git] / sdk / cwl / arvados_cwl / arvworkflow.py
index eb78a25fedbd4754752ff8598d7e1faa6b1585db..f66e50dca935870a1f1c88ccc4e4be023c60f9c5 100644 (file)
@@ -2,26 +2,45 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
+from past.builtins import basestring
+from future.utils import viewitems
+
 import os
 import json
 import copy
 import logging
+import urllib
+from io import StringIO
+import sys
+
+from typing import (MutableSequence, MutableMapping)
+
+from ruamel.yaml import YAML
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
 
 from schema_salad.sourceline import SourceLine, cmap
+import schema_salad.ref_resolver
+
+import arvados.collection
 
 from cwltool.pack import pack
-from cwltool.load_tool import fetch_document
+from cwltool.load_tool import fetch_document, resolve_and_validate_document
 from cwltool.process import shortname
 from cwltool.workflow import Workflow, WorkflowException, WorkflowStep
-from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, visit_class
+from cwltool.utils import adjustFileObjs, adjustDirObjs, visit_class, normalizeFilesDirs
 from cwltool.context import LoadingContext
 
+from schema_salad.ref_resolver import file_uri, uri_file_path
+
 import ruamel.yaml as yaml
 
 from .runner import (upload_dependencies, packed_workflow, upload_workflow_collection,
-                     trim_anonymous_location, remove_redundant_fields, discover_secondary_files)
+                     trim_anonymous_location, remove_redundant_fields, discover_secondary_files,
+                     make_builder, arvados_jobs_image)
 from .pathmapper import ArvPathMapper, trim_listing
-from .arvtool import ArvadosCommandTool, set_cluster_target, make_builder
+from .arvtool import ArvadosCommandTool, set_cluster_target
+from ._version import __version__
+
 from .perf import Perf
 
 logger = logging.getLogger('arvados.cwl-runner')
@@ -30,10 +49,218 @@ metrics = logging.getLogger('arvados.cwl-runner.metrics')
 max_res_pars = ("coresMin", "coresMax", "ramMin", "ramMax", "tmpdirMin", "tmpdirMax")
 sum_res_pars = ("outdirMin", "outdirMax")
 
-def upload_workflow(arvRunner, tool, job_order, project_uuid, uuid=None,
-                    submit_runner_ram=0, name=None, merged_map=None):
+def make_wrapper_workflow(arvRunner, main, packed, project_uuid, name, git_info, tool):
+    col = arvados.collection.Collection(api_client=arvRunner.api,
+                                        keep_client=arvRunner.keep_client)
+
+    with col.open("workflow.json", "wt") as f:
+        json.dump(packed, f, sort_keys=True, indent=4, separators=(',',': '))
+
+    pdh = col.portable_data_hash()
+
+    toolname = tool.tool.get("label") or tool.metadata.get("label") or os.path.basename(tool.tool["id"])
+    if git_info and git_info.get("http://arvados.org/cwl#gitDescribe"):
+        toolname = "%s (%s)" % (toolname, git_info.get("http://arvados.org/cwl#gitDescribe"))
+
+    existing = arvRunner.api.collections().list(filters=[["portable_data_hash", "=", pdh], ["owner_uuid", "=", project_uuid]]).execute(num_retries=arvRunner.num_retries)
+    if len(existing["items"]) == 0:
+        col.save_new(name=toolname, owner_uuid=project_uuid, ensure_unique_name=True)
+
+    # now construct the wrapper
+
+    step = {
+        "id": "#main/" + toolname,
+        "in": [],
+        "out": [],
+        "run": "keep:%s/workflow.json#main" % pdh,
+        "label": name
+    }
+
+    newinputs = []
+    for i in main["inputs"]:
+        inp = {}
+        # Make sure to only copy known fields that are meaningful at
+        # the workflow level. In practice this ensures that if we're
+        # wrapping a CommandLineTool we don't grab inputBinding.
+        # Right now also excludes extension fields, which is fine,
+        # Arvados doesn't currently look for any extension fields on
+        # input parameters.
+        for f in ("type", "label", "secondaryFiles", "streamable",
+                  "doc", "id", "format", "loadContents",
+                  "loadListing", "default"):
+            if f in i:
+                inp[f] = i[f]
+        newinputs.append(inp)
+
+    wrapper = {
+        "class": "Workflow",
+        "id": "#main",
+        "inputs": newinputs,
+        "outputs": [],
+        "steps": [step]
+    }
+
+    for i in main["inputs"]:
+        step["in"].append({
+            "id": "#main/step/%s" % shortname(i["id"]),
+            "source": i["id"]
+        })
+
+    for i in main["outputs"]:
+        step["out"].append({"id": "#main/step/%s" % shortname(i["id"])})
+        wrapper["outputs"].append({"outputSource": "#main/step/%s" % shortname(i["id"]),
+                                   "type": i["type"],
+                                   "id": i["id"]})
+
+    wrapper["requirements"] = [{"class": "SubworkflowFeatureRequirement"}]
+
+    if main.get("requirements"):
+        wrapper["requirements"].extend(main["requirements"])
+    if main.get("hints"):
+        wrapper["hints"] = main["hints"]
+
+    doc = {"cwlVersion": "v1.2", "$graph": [wrapper]}
+
+    if git_info:
+        for g in git_info:
+            doc[g] = git_info[g]
+
+    return json.dumps(doc, sort_keys=True, indent=4, separators=(',',': '))
+
+def rel_ref(s, baseuri, urlexpander, merged_map):
+    uri = urlexpander(s, baseuri)
+    if baseuri in merged_map:
+        replacements = merged_map[baseuri].resolved
+        if uri in replacements:
+            return replacements[uri]
+
+    p1 = os.path.dirname(uri_file_path(baseuri))
+    p2 = os.path.dirname(uri_file_path(uri))
+    p3 = os.path.basename(uri_file_path(uri))
+    r = os.path.relpath(p2, p1)
+    if r == ".":
+        r = ""
+    print("AAA", uri, s)
+    print("BBBB", p1, p2, p3, r)
+    return os.path.join(r, p3)
+
+
+def update_refs(d, baseuri, urlexpander, merged_map, set_block_style):
+    if isinstance(d, CommentedSeq):
+        if set_block_style:
+            d.fa.set_block_style()
+        for s in d:
+            update_refs(s, baseuri, urlexpander, merged_map, set_block_style)
+    elif isinstance(d, CommentedMap):
+        if set_block_style:
+            d.fa.set_block_style()
+
+        if "id" in d:
+            baseuri = urlexpander(d["id"], baseuri, scoped_id=True)
+
+        for s in d:
+            for field in ("$include", "$import", "location", "run"):
+                if field in d and isinstance(d[field], str):
+                    d[field] = rel_ref(d[field], baseuri, urlexpander, merged_map)
+
+            if "$schemas" in d:
+                for n, s in enumerate(d["$schemas"]):
+                    d["$schemas"][n] = rel_ref(d["$schemas"][n], baseuri, urlexpander, merged_map)
+
+            update_refs(d[s], baseuri, urlexpander, merged_map, set_block_style)
+
+def new_upload_workflow(arvRunner, tool, job_order, project_uuid,
+                    runtimeContext, uuid=None,
+                    submit_runner_ram=0, name=None, merged_map=None,
+                    submit_runner_image=None,
+                    git_info=None):
+
+    firstfile = None
+    workflow_files = set()
+    import_files = set()
+    include_files = set()
+
+    for w in tool.doc_loader.idx:
+        if w.startswith("file://"):
+            workflow_files.add(urllib.parse.urldefrag(w)[0])
+            if firstfile is None:
+                firstfile = urllib.parse.urldefrag(w)[0]
+        if w.startswith("import:file://"):
+            import_files.add(urllib.parse.urldefrag(w[7:])[0])
+        if w.startswith("include:file://"):
+            include_files.add(urllib.parse.urldefrag(w[8:])[0])
+
+    all_files = workflow_files | import_files | include_files
+
+    n = 7
+    allmatch = True
+    while allmatch:
+        n += 1
+        for f in all_files:
+            if len(f)-1 < n:
+                n -= 1
+                allmatch = False
+                break
+            if f[n] != firstfile[n]:
+                allmatch = False
+                break
+
+    while firstfile[n] != "/":
+        n -= 1
+
+    prefix = firstfile[:n+1]
+
+    col = arvados.collection.Collection()
+
+    #print(merged_map.keys())
+
+    for w in workflow_files | import_files:
+        # 1. load YAML
+
+        text = tool.doc_loader.fetch_text(w)
+        if isinstance(text, bytes):
+            textIO = StringIO(text.decode('utf-8'))
+        else:
+            textIO = StringIO(text)
+
+        yamlloader = schema_salad.utils.yaml_no_ts()
+        result = yamlloader.load(textIO)
+
+        set_block_style = False
+        if result.fa.flow_style():
+            set_block_style = True
+
+        # 2. find $import, $include, $schema, run, location
+        # 3. update field value
+        update_refs(result, w, tool.doc_loader.expand_url, merged_map, set_block_style)
+
+        with col.open(w[n+1:], "wt") as f:
+            yamlloader.dump(result, stream=f)
+
+    for w in include_files:
+        with col.open(w[n+1:], "wb") as f1:
+            with open(uri_file_path(w), "rb") as f2:
+                dat = f2.read(65536)
+                while dat:
+                    f1.write(dat)
+                    dat = f2.read(65536)
+
+    toolname = tool.tool.get("label") or tool.metadata.get("label") or os.path.basename(tool.tool["id"])
+    if git_info and git_info.get("http://arvados.org/cwl#gitDescribe"):
+        toolname = "%s (%s)" % (toolname, git_info.get("http://arvados.org/cwl#gitDescribe"))
+
+    col.save_new(name=toolname, owner_uuid=arvRunner.project_uuid, ensure_unique_name=True)
+
+    return col.manifest_locator()
+
 
-    packed = packed_workflow(arvRunner, tool, merged_map)
+def upload_workflow(arvRunner, tool, job_order, project_uuid,
+                    runtimeContext, uuid=None,
+                    submit_runner_ram=0, name=None, merged_map=None,
+                    submit_runner_image=None,
+                    git_info=None):
+
+    packed = packed_workflow(arvRunner, tool, merged_map, runtimeContext, git_info)
 
     adjustDirObjs(job_order, trim_listing)
     adjustFileObjs(job_order, trim_anonymous_location)
@@ -49,26 +276,38 @@ def upload_workflow(arvRunner, tool, job_order, project_uuid, uuid=None,
         name = tool.tool.get("label", os.path.basename(tool.tool["id"]))
 
     upload_dependencies(arvRunner, name, tool.doc_loader,
-                        packed, tool.tool["id"], False)
+                        packed, tool.tool["id"],
+                        runtimeContext)
+
+    wf_runner_resources = None
+
+    hints = main.get("hints", [])
+    found = False
+    for h in hints:
+        if h["class"] == "http://arvados.org/cwl#WorkflowRunnerResources":
+            wf_runner_resources = h
+            found = True
+            break
+    if not found:
+        wf_runner_resources = {"class": "http://arvados.org/cwl#WorkflowRunnerResources"}
+        hints.append(wf_runner_resources)
+
+    wf_runner_resources["acrContainerImage"] = arvados_jobs_image(arvRunner,
+                                                                  submit_runner_image or "arvados/jobs:"+__version__,
+                                                                  runtimeContext)
 
     if submit_runner_ram:
-        hints = main.get("hints", [])
-        found = False
-        for h in hints:
-            if h["class"] == "http://arvados.org/cwl#WorkflowRunnerResources":
-                h["ramMin"] = submit_runner_ram
-                found = True
-                break
-        if not found:
-            hints.append({"class": "http://arvados.org/cwl#WorkflowRunnerResources",
-                          "ramMin": submit_runner_ram})
-        main["hints"] = hints
+        wf_runner_resources["ramMin"] = submit_runner_ram
+
+    main["hints"] = hints
+
+    wrapper = make_wrapper_workflow(arvRunner, main, packed, project_uuid, name, git_info, tool)
 
     body = {
         "workflow": {
             "name": name,
             "description": tool.tool.get("doc", ""),
-            "definition":json.dumps(packed, sort_keys=True, indent=4, separators=(',',': '))
+            "definition": wrapper
         }}
     if project_uuid:
         body["workflow"]["owner_uuid"] = project_uuid
@@ -127,15 +366,21 @@ class ArvadosWorkflowStep(WorkflowStep):
                  **argv
                 ):  # type: (...) -> None
 
-        super(ArvadosWorkflowStep, self).__init__(toolpath_object, pos, loadingContext, *argc, **argv)
-        self.tool["class"] = "WorkflowStep"
+        if arvrunner.fast_submit:
+            self.tool = toolpath_object
+            self.tool["inputs"] = []
+            self.tool["outputs"] = []
+        else:
+            super(ArvadosWorkflowStep, self).__init__(toolpath_object, pos, loadingContext, *argc, **argv)
+            self.tool["class"] = "WorkflowStep"
         self.arvrunner = arvrunner
 
     def job(self, joborder, output_callback, runtimeContext):
         runtimeContext = runtimeContext.copy()
         runtimeContext.toplevel = True  # Preserve behavior for #13365
 
-        builder = make_builder({shortname(k): v for k,v in joborder.items()}, self.hints, self.requirements, runtimeContext)
+        builder = make_builder({shortname(k): v for k,v in viewitems(joborder)}, self.hints, self.requirements,
+                               runtimeContext, self.metadata)
         runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)
         return super(ArvadosWorkflowStep, self).job(joborder, output_callback, runtimeContext)
 
@@ -155,7 +400,7 @@ class ArvadosWorkflow(Workflow):
 
     def job(self, joborder, output_callback, runtimeContext):
 
-        builder = make_builder(joborder, self.hints, self.requirements, runtimeContext)
+        builder = make_builder(joborder, self.hints, self.requirements, runtimeContext, self.metadata)
         runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)
 
         req, _ = self.get_requirement("http://arvados.org/cwl#RunInSingleContainer")
@@ -167,25 +412,30 @@ class ArvadosWorkflow(Workflow):
         with SourceLine(self.tool, None, WorkflowException, logger.isEnabledFor(logging.DEBUG)):
             if "id" not in self.tool:
                 raise WorkflowException("%s object must have 'id'" % (self.tool["class"]))
-        document_loader, workflowobj, uri = (self.doc_loader, self.doc_loader.fetch(self.tool["id"]), self.tool["id"])
 
-        discover_secondary_files(self.tool["inputs"], joborder)
+        discover_secondary_files(self.arvrunner.fs_access, builder,
+                                 self.tool["inputs"], joborder)
+        normalizeFilesDirs(joborder)
 
         with Perf(metrics, "subworkflow upload_deps"):
             upload_dependencies(self.arvrunner,
                                 os.path.basename(joborder.get("id", "#")),
-                                document_loader,
+                                self.doc_loader,
                                 joborder,
                                 joborder.get("id", "#"),
-                                False)
+                                runtimeContext)
 
             if self.wf_pdh is None:
-                workflowobj["requirements"] = dedup_reqs(self.requirements)
-                workflowobj["hints"] = dedup_reqs(self.hints)
+                packed = pack(self.loadingContext, self.tool["id"], loader=self.doc_loader)
 
-                packed = pack(document_loader, workflowobj, uri, self.metadata)
+                for p in packed["$graph"]:
+                    if p["id"] == "#main":
+                        p["requirements"] = dedup_reqs(self.requirements)
+                        p["hints"] = dedup_reqs(self.hints)
 
                 def visit(item):
+                    if "requirements" in item:
+                        item["requirements"] = [i for i in item["requirements"] if i["class"] != "DockerRequirement"]
                     for t in ("hints", "requirements"):
                         if t not in item:
                             continue
@@ -213,10 +463,10 @@ class ArvadosWorkflow(Workflow):
 
                 upload_dependencies(self.arvrunner,
                                     runtimeContext.name,
-                                    document_loader,
+                                    self.doc_loader,
                                     packed,
-                                    uri,
-                                    False)
+                                    self.tool["id"],
+                                    runtimeContext)
 
                 # Discover files/directories referenced by the
                 # workflow (mainly "default" values)
@@ -280,7 +530,21 @@ class ArvadosWorkflow(Workflow):
             if self.wf_pdh is None:
                 adjustFileObjs(packed, keepmount)
                 adjustDirObjs(packed, keepmount)
-                self.wf_pdh = upload_workflow_collection(self.arvrunner, shortname(self.tool["id"]), packed)
+                self.wf_pdh = upload_workflow_collection(self.arvrunner, shortname(self.tool["id"]), packed, runtimeContext)
+
+        self.loadingContext = self.loadingContext.copy()
+        self.loadingContext.metadata = self.loadingContext.metadata.copy()
+        self.loadingContext.metadata["http://commonwl.org/cwltool#original_cwlVersion"] = "v1.0"
+
+        if len(job_res_reqs) == 1:
+            # RAM request needs to be at least 128 MiB or the workflow
+            # runner itself won't run reliably.
+            if job_res_reqs[0].get("ramMin", 1024) < 128:
+                job_res_reqs[0]["ramMin"] = 128
+
+        arguments = ["--no-container", "--move-outputs", "--preserve-entire-environment", "workflow.cwl", "cwl.input.yml"]
+        if runtimeContext.debug:
+            arguments.insert(0, '--debug')
 
         wf_runner = cmap({
             "class": "CommandLineTool",
@@ -301,7 +565,7 @@ class ArvadosWorkflow(Workflow):
                     }]
             }],
             "hints": self.hints,
-            "arguments": ["--no-container", "--move-outputs", "--preserve-entire-environment", "workflow.cwl#main", "cwl.input.yml"],
+            "arguments": arguments,
             "id": "#"
         })
         return ArvadosCommandTool(self.arvrunner, wf_runner, self.loadingContext).job(joborder_resolved, output_callback, runtimeContext)