Update storage_classes support for arvados_cwl_runner to work correctly
[arvados.git] / sdk / cwl / arvados_cwl / runner.py
index fb5d036e941969df71b6a3062d09bb87d4328739..f907d33951c45a5d707bb15dd18c9154ae1b5bad 100644 (file)
@@ -8,13 +8,13 @@ from functools import partial
 import logging
 import json
 import subprocess
+from collections import namedtuple
 
 from StringIO import StringIO
 
-from schema_salad.sourceline import SourceLine
+from schema_salad.sourceline import SourceLine, cmap
 
-import cwltool.draft2tool
-from cwltool.draft2tool import CommandLineTool
+from cwltool.command_line_tool import CommandLineTool
 import cwltool.workflow
 from cwltool.process import get_feature, scandeps, UnsupportedRequirement, normalizeFilesDirs, shortname
 from cwltool.load_tool import fetch_document
@@ -46,11 +46,13 @@ def trim_anonymous_location(obj):
     if obj.get("location", "").startswith("_:"):
         del obj["location"]
 
+
 def remove_redundant_fields(obj):
     for field in ("path", "nameext", "nameroot", "dirname"):
         if field in obj:
             del obj[field]
 
+
 def find_defaults(d, op):
     if isinstance(d, list):
         for i in d:
@@ -62,8 +64,25 @@ def find_defaults(d, op):
             for i in d.itervalues():
                 find_defaults(i, op)
 
+def setSecondary(t, fileobj, discovered):
+    if isinstance(fileobj, dict) and fileobj.get("class") == "File":
+        if "secondaryFiles" not in fileobj:
+            fileobj["secondaryFiles"] = cmap([{"location": substitute(fileobj["location"], sf), "class": "File"} for sf in t["secondaryFiles"]])
+            if discovered is not None:
+                discovered[fileobj["location"]] = fileobj["secondaryFiles"]
+    elif isinstance(fileobj, list):
+        for e in fileobj:
+            setSecondary(t, e, discovered)
+
+def discover_secondary_files(inputs, job_order, discovered=None):
+    for t in inputs:
+        if shortname(t["id"]) in job_order and t.get("secondaryFiles"):
+            setSecondary(t, job_order[shortname(t["id"])], discovered)
+
+
 def upload_dependencies(arvrunner, name, document_loader,
-                        workflowobj, uri, loadref_run, include_primary=True):
+                        workflowobj, uri, loadref_run,
+                        include_primary=True, discovered_secondaryfiles=None):
     """Upload the dependencies of the workflowobj document to Keep.
 
     Returns a pathmapper object mapping local paths to keep references.  Also
@@ -103,11 +122,18 @@ def upload_dependencies(arvrunner, name, document_loader,
         # that external references in $include and $mixin are captured.
         scanobj = loadref("", workflowobj["id"])
 
-    sc = scandeps(uri, scanobj,
+    sc_result = scandeps(uri, scanobj,
                   loadref_fields,
                   set(("$include", "$schemas", "location")),
                   loadref, urljoin=document_loader.fetcher.urljoin)
 
+    sc = []
+    def only_real(obj):
+        if obj.get("location", "").startswith("file:"):
+            sc.append(obj)
+
+    visit_class(sc_result, ("File", "Directory"), only_real)
+
     normalizeFilesDirs(sc)
 
     if include_primary and "id" in workflowobj:
@@ -117,22 +143,33 @@ def upload_dependencies(arvrunner, name, document_loader,
         for s in workflowobj["$schemas"]:
             sc.append({"class": "File", "location": s})
 
-    def capture_default(obj):
+    def visit_default(obj):
         remove = [False]
-        def add_default(f):
+        def ensure_default_location(f):
             if "location" not in f and "path" in f:
                 f["location"] = f["path"]
                 del f["path"]
             if "location" in f and not arvrunner.fs_access.exists(f["location"]):
-                # Remove from sc
+                # Doesn't exist, remove from list of dependencies to upload
                 sc[:] = [x for x in sc if x["location"] != f["location"]]
                 # Delete "default" from workflowobj
                 remove[0] = True
-        visit_class(obj["default"], ("File", "Directory"), add_default)
+        visit_class(obj["default"], ("File", "Directory"), ensure_default_location)
         if remove[0]:
             del obj["default"]
 
-    find_defaults(workflowobj, capture_default)
+    find_defaults(workflowobj, visit_default)
+
+    discovered = {}
+    def discover_default_secondary_files(obj):
+        discover_secondary_files(obj["inputs"],
+                                 {shortname(t["id"]): t["default"] for t in obj["inputs"] if "default" in t},
+                                 discovered)
+
+    visit_class(workflowobj, ("CommandLineTool", "Workflow"), discover_default_secondary_files)
+
+    for d in discovered:
+        sc.extend(discovered[d])
 
     mapper = ArvPathMapper(arvrunner, sc, "",
                            "keep:%s",
@@ -143,8 +180,13 @@ def upload_dependencies(arvrunner, name, document_loader,
     def setloc(p):
         if "location" in p and (not p["location"].startswith("_:")) and (not p["location"].startswith("keep:")):
             p["location"] = mapper.mapper(p["location"]).resolved
-    adjustFileObjs(workflowobj, setloc)
-    adjustDirObjs(workflowobj, setloc)
+
+    visit_class(workflowobj, ("File", "Directory"), setloc)
+    visit_class(discovered, ("File", "Directory"), setloc)
+
+    if discovered_secondaryfiles is not None:
+        for d in discovered:
+            discovered_secondaryfiles[mapper.mapper(d).resolved] = discovered[d]
 
     if "$schemas" in workflowobj:
         sch = []
@@ -172,6 +214,7 @@ def upload_docker(arvrunner, tool):
         for s in tool.steps:
             upload_docker(arvrunner, s.embedded_tool)
 
+
 def packed_workflow(arvrunner, tool, merged_map):
     """Create a packed workflow.
 
@@ -181,16 +224,18 @@ def packed_workflow(arvrunner, tool, merged_map):
     packed = pack(tool.doc_loader, tool.doc_loader.fetch(tool.tool["id"]),
                   tool.tool["id"], tool.metadata, rewrite_out=rewrites)
 
-    rewrite_to_orig = {}
-    for k,v in rewrites.items():
-        rewrite_to_orig[v] = k
+    rewrite_to_orig = {v: k for k,v in rewrites.items()}
 
     def visit(v, cur_id):
         if isinstance(v, dict):
             if v.get("class") in ("CommandLineTool", "Workflow"):
+                if "id" not in v:
+                    raise SourceLine(v, None, Exception).makeError("Embedded process object is missing required 'id' field")
                 cur_id = rewrite_to_orig.get(v["id"], v["id"])
             if "location" in v and not v["location"].startswith("keep:"):
-                v["location"] = merged_map[cur_id][v["location"]]
+                v["location"] = merged_map[cur_id].resolved[v["location"]]
+            if "location" in v and v["location"] in merged_map[cur_id].secondaryFiles:
+                v["secondaryFiles"] = merged_map[cur_id].secondaryFiles[v["location"]]
             for l in v:
                 visit(v[l], cur_id)
         if isinstance(v, list):
@@ -199,6 +244,7 @@ def packed_workflow(arvrunner, tool, merged_map):
     visit(packed, None)
     return packed
 
+
 def tag_git_version(packed):
     if tool.tool["id"].startswith("file://"):
         path = os.path.dirname(tool.tool["id"][7:])
@@ -210,20 +256,6 @@ def tag_git_version(packed):
             packed["http://schema.org/version"] = githash
 
 
-def discover_secondary_files(inputs, job_order):
-    for t in inputs:
-        def setSecondary(fileobj):
-            if isinstance(fileobj, dict) and fileobj.get("class") == "File":
-                if "secondaryFiles" not in fileobj:
-                    fileobj["secondaryFiles"] = [{"location": substitute(fileobj["location"], sf), "class": "File"} for sf in t["secondaryFiles"]]
-
-            if isinstance(fileobj, list):
-                for e in fileobj:
-                    setSecondary(e)
-
-        if shortname(t["id"]) in job_order and t.get("secondaryFiles"):
-            setSecondary(job_order[shortname(t["id"])])
-
 def upload_job_order(arvrunner, name, tool, job_order):
     """Upload local files referenced in the input object and return updated input
     object with 'location' updated to the proper keep references.
@@ -248,6 +280,8 @@ def upload_job_order(arvrunner, name, tool, job_order):
 
     return job_order
 
+FileUpdates = namedtuple("FileUpdates", ["resolved", "secondaryFiles"])
+
 def upload_workflow_deps(arvrunner, tool):
     # Ensure that Docker images needed by this workflow are available
 
@@ -259,18 +293,20 @@ def upload_workflow_deps(arvrunner, tool):
 
     def upload_tool_deps(deptool):
         if "id" in deptool:
+            discovered_secondaryfiles = {}
             pm = upload_dependencies(arvrunner,
-                                "%s dependencies" % (shortname(deptool["id"])),
-                                document_loader,
-                                deptool,
-                                deptool["id"],
-                                False,
-                                include_primary=False)
+                                     "%s dependencies" % (shortname(deptool["id"])),
+                                     document_loader,
+                                     deptool,
+                                     deptool["id"],
+                                     False,
+                                     include_primary=False,
+                                     discovered_secondaryfiles=discovered_secondaryfiles)
             document_loader.idx[deptool["id"]] = deptool
             toolmap = {}
             for k,v in pm.items():
                 toolmap[k] = v.resolved
-            merged_map[deptool["id"]] = toolmap
+            merged_map[deptool["id"]] = FileUpdates(toolmap, discovered_secondaryfiles)
 
     tool.visit(upload_tool_deps)
 
@@ -317,7 +353,8 @@ class Runner(object):
     def __init__(self, runner, tool, job_order, enable_reuse,
                  output_name, output_tags, submit_runner_ram=0,
                  name=None, on_error=None, submit_runner_image=None,
-                 intermediate_output_ttl=0, merged_map=None):
+                 intermediate_output_ttl=0, merged_map=None, default_storage_classes="default",
+                 priority=None, secret_store=None):
         self.arvrunner = runner
         self.tool = tool
         self.job_order = job_order
@@ -337,6 +374,9 @@ class Runner(object):
         self.on_error = on_error
         self.jobs_image = submit_runner_image or "arvados/jobs:"+__version__
         self.intermediate_output_ttl = intermediate_output_ttl
+        self.priority = priority
+        self.secret_store = secret_store
+        self.default_storage_classes = default_storage_classes
 
         if submit_runner_ram:
             self.submit_runner_ram = submit_runner_ram
@@ -397,6 +437,3 @@ class Runner(object):
             self.arvrunner.output_callback({}, "permanentFail")
         else:
             self.arvrunner.output_callback(outputs, processStatus)
-        finally:
-            if record["uuid"] in self.arvrunner.processes:
-                del self.arvrunner.processes[record["uuid"]]