#
# SPDX-License-Identifier: Apache-2.0
-from past.builtins import basestring
-from future.utils import viewitems
-
import os
import json
import copy
import logging
import urllib
-from io import StringIO
import sys
+import re
+from io import StringIO
from typing import (MutableSequence, MutableMapping)
from ruamel.yaml import YAML
from cwltool.pack import pack
from cwltool.load_tool import fetch_document, resolve_and_validate_document
-from cwltool.process import shortname
+from cwltool.process import shortname, uniquename
from cwltool.workflow import Workflow, WorkflowException, WorkflowStep
from cwltool.utils import adjustFileObjs, adjustDirObjs, visit_class, normalizeFilesDirs
-from cwltool.context import LoadingContext
+from cwltool.context import LoadingContext, getdefault
from schema_salad.ref_resolver import file_uri, uri_file_path
from .runner import (upload_dependencies, packed_workflow, upload_workflow_collection,
trim_anonymous_location, remove_redundant_fields, discover_secondary_files,
- make_builder, arvados_jobs_image)
+ make_builder, arvados_jobs_image, FileUpdates)
+from .arvcontainer import RunnerContainer
from .pathmapper import ArvPathMapper, trim_listing
from .arvtool import ArvadosCommandTool, set_cluster_target
from ._version import __version__
+from .util import common_prefix
+from .arvdocker import arv_docker_get_image
from .perf import Perf
max_res_pars = ("coresMin", "coresMax", "ramMin", "ramMax", "tmpdirMin", "tmpdirMax")
sum_res_pars = ("outdirMin", "outdirMax")
+_basetype_re = re.compile(r'''(?:
+Directory
+|File
+|array
+|boolean
+|double
+|enum
+|float
+|int
+|long
+|null
+|record
+|string
+)(?:\[\])?\??''', re.VERBOSE)
+
def make_wrapper_workflow(arvRunner, main, packed, project_uuid, name, git_info, tool):
col = arvados.collection.Collection(api_client=arvRunner.api,
keep_client=arvRunner.keep_client)
return json.dumps(doc, sort_keys=True, indent=4, separators=(',',': '))
-def rel_ref(s, baseuri, urlexpander, merged_map):
+
+def rel_ref(s, baseuri, urlexpander, merged_map, jobmapper):
+ if s.startswith("keep:") or s.startswith("arvwf:"):
+ return s
+
uri = urlexpander(s, baseuri)
+
+ if uri.startswith("keep:"):
+ return uri
+
fileuri = urllib.parse.urldefrag(baseuri)[0]
- if fileuri in merged_map:
- replacements = merged_map[fileuri].resolved
- if uri in replacements:
- return replacements[uri]
- if s.startswith("keep:"):
- return s
+ for u in (baseuri, fileuri):
+ if u in merged_map:
+ replacements = merged_map[u].resolved
+ if uri in replacements:
+ return replacements[uri]
+
+ if uri in jobmapper:
+ return jobmapper.mapper(uri).target
- p1 = os.path.dirname(uri_file_path(baseuri))
+ p1 = os.path.dirname(uri_file_path(fileuri))
p2 = os.path.dirname(uri_file_path(uri))
p3 = os.path.basename(uri_file_path(uri))
+
r = os.path.relpath(p2, p1)
if r == ".":
r = ""
- return os.path.join(r, p3)
+ return os.path.join(r, p3)
-def update_refs(d, baseuri, urlexpander, merged_map, set_block_style, runtimeContext):
- if set_block_style and (isinstance(d, CommentedSeq) or isinstance(d, CommentedMap)):
- d.fa.set_block_style()
+def is_basetype(tp):
+ return _basetype_re.match(tp) is not None
+def update_refs(api, d, baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix):
if isinstance(d, MutableSequence):
- for s in d:
- update_refs(s, baseuri, urlexpander, merged_map, set_block_style, runtimeContext)
+ for i, s in enumerate(d):
+ if prefix and isinstance(s, str):
+ if s.startswith(prefix):
+ d[i] = replacePrefix+s[len(prefix):]
+ else:
+ update_refs(api, s, baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix)
elif isinstance(d, MutableMapping):
+ for field in ("id", "name"):
+ if isinstance(d.get(field), str) and d[field].startswith("_:"):
+ # blank node reference, was added in automatically, can get rid of it.
+ del d[field]
+
if "id" in d:
baseuri = urlexpander(d["id"], baseuri, scoped_id=True)
+ elif "name" in d and isinstance(d["name"], str):
+ baseuri = urlexpander(d["name"], baseuri, scoped_id=True)
if d.get("class") == "DockerRequirement":
- dockerImageId = d.get("dockerImageId") or d.get("dockerPull")
- d["http://arvados.org/cwl#dockerCollectionPDH"] = runtimeContext.cached_docker_lookups.get(dockerImageId)
+ d["http://arvados.org/cwl#dockerCollectionPDH"] = arv_docker_get_image(api, d, False,
+ runtimeContext)
+
+ for field in d:
+ if field in ("location", "run", "name") and isinstance(d[field], str):
+ d[field] = rel_ref(d[field], baseuri, urlexpander, merged_map, jobmapper)
+ continue
+
+ if field in ("$include", "$import") and isinstance(d[field], str):
+ d[field] = rel_ref(d[field], baseuri, urlexpander, {}, jobmapper)
+ continue
+
+ for t in ("type", "items"):
+ if (field == t and
+ isinstance(d[t], str) and
+ not is_basetype(d[t])):
+ d[t] = rel_ref(d[t], baseuri, urlexpander, merged_map, jobmapper)
+ continue
+
+ if field == "inputs" and isinstance(d["inputs"], MutableMapping):
+ for inp in d["inputs"]:
+ if isinstance(d["inputs"][inp], str) and not is_basetype(d["inputs"][inp]):
+ d["inputs"][inp] = rel_ref(d["inputs"][inp], baseuri, urlexpander, merged_map, jobmapper)
+ if isinstance(d["inputs"][inp], MutableMapping):
+ update_refs(api, d["inputs"][inp], baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix)
+ continue
+
+ if field in ("requirements", "hints") and isinstance(d[field], MutableMapping):
+ dr = d[field].get("DockerRequirement")
+ if dr:
+ dr["http://arvados.org/cwl#dockerCollectionPDH"] = arv_docker_get_image(api, dr, False,
+ runtimeContext)
+
+ if field == "$schemas":
+ for n, s in enumerate(d["$schemas"]):
+ d["$schemas"][n] = rel_ref(d["$schemas"][n], baseuri, urlexpander, merged_map, jobmapper)
+ continue
- for s in d:
- for field in ("$include", "$import", "location", "run"):
- if field in d and isinstance(d[field], str):
- d[field] = rel_ref(d[field], baseuri, urlexpander, merged_map)
+ update_refs(api, d[field], baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix)
- if "$schemas" in d:
- for n, s in enumerate(d["$schemas"]):
- d["$schemas"][n] = rel_ref(d["$schemas"][n], baseuri, urlexpander, merged_map)
- update_refs(d[s], baseuri, urlexpander, merged_map, set_block_style, runtimeContext)
+def fix_schemadef(req, baseuri, urlexpander, merged_map, jobmapper, pdh):
+ req = copy.deepcopy(req)
+
+ for f in req["types"]:
+ r = f["name"]
+ path, frag = urllib.parse.urldefrag(r)
+ rel = rel_ref(r, baseuri, urlexpander, merged_map, jobmapper)
+ merged_map.setdefault(path, FileUpdates({}, {}))
+ rename = "keep:%s/%s" %(pdh, rel)
+ for mm in merged_map:
+ merged_map[mm].resolved[r] = rename
+ return req
-def new_upload_workflow(arvRunner, tool, job_order, project_uuid,
+
+def drop_ids(d):
+ if isinstance(d, MutableSequence):
+ for i, s in enumerate(d):
+ drop_ids(s)
+ elif isinstance(d, MutableMapping):
+ if "id" in d and d["id"].startswith("file:"):
+ del d["id"]
+
+ for field in d:
+ drop_ids(d[field])
+
+
+def upload_workflow(arvRunner, tool, job_order, project_uuid,
runtimeContext,
uuid=None,
submit_runner_ram=0, name=None, merged_map=None,
submit_runner_image=None,
git_info=None,
- set_defaults=False):
+ set_defaults=False,
+ jobmapper=None):
firstfile = None
workflow_files = set()
import_files = set()
include_files = set()
+ # The document loader index will have entries for all the files
+ # that were loaded in the process of parsing the entire workflow
+ # (including subworkflows, tools, imports, etc). We use this to
+ # get compose a list of the workflow file dependencies.
for w in tool.doc_loader.idx:
if w.startswith("file://"):
workflow_files.add(urllib.parse.urldefrag(w)[0])
all_files = workflow_files | import_files | include_files
- n = 7
- allmatch = True
- while allmatch:
- n += 1
- for f in all_files:
- if len(f)-1 < n:
- n -= 1
- allmatch = False
- break
- if f[n] != firstfile[n]:
- allmatch = False
- break
-
- while firstfile[n] != "/":
- n -= 1
+ # Find the longest common prefix among all the file names. We'll
+ # use this to recreate the directory structure in a keep
+ # collection with correct relative references.
+ prefix = common_prefix(firstfile, all_files) if firstfile else ""
- prefix = firstfile[:n+1]
col = arvados.collection.Collection(api_client=arvRunner.api)
+ # Now go through all the files and update references to other
+ # files. We previously scanned for file dependencies, these are
+ # are passed in as merged_map.
+ #
+ # note about merged_map: we upload dependencies of each process
+ # object (CommandLineTool/Workflow) to a separate collection.
+ # That way, when the user edits something, this limits collection
+ # PDH changes to just that tool, and minimizes situations where
+ # small changes break container reuse for the whole workflow.
+ #
for w in workflow_files | import_files:
- # 1. load YAML
+ # 1. load the YAML file
text = tool.doc_loader.fetch_text(w)
if isinstance(text, bytes):
yamlloader = schema_salad.utils.yaml_no_ts()
result = yamlloader.load(textIO)
- set_block_style = False
- if result.fa.flow_style():
- set_block_style = True
+ # If the whole document is in "flow style" it is probably JSON
+ # formatted. We'll re-export it as JSON because the
+ # ruamel.yaml round-trip mode is a lie and only preserves
+ # "block style" formatting and not "flow style" formatting.
+ export_as_json = result.fa.flow_style()
# 2. find $import, $include, $schema, run, location
# 3. update field value
- update_refs(result, w, tool.doc_loader.expand_url, merged_map, set_block_style, runtimeContext)
+ update_refs(arvRunner.api, result, w, tool.doc_loader.expand_url, merged_map, jobmapper, runtimeContext, "", "")
+
+ # Write the updated file to the collection.
+ with col.open(w[len(prefix):], "wt") as f:
+ if export_as_json:
+ json.dump(result, f, indent=4, separators=(',',': '))
+ else:
+ yamlloader.dump(result, stream=f)
- with col.open(w[n+1:], "wt") as f:
- #print(yamlloader.dump(result, stream=sys.stdout))
- yamlloader.dump(result, stream=f)
+ # Also store a verbatim copy of the original files
+ with col.open(os.path.join("original", w[len(prefix):]), "wt") as f:
+ f.write(text)
+
+ # Upload files referenced by $include directives, these are used
+ # unchanged and don't need to be updated.
for w in include_files:
- with col.open(w[n+1:], "wb") as f1:
- with open(uri_file_path(w), "rb") as f2:
- dat = f2.read(65536)
- while dat:
- f1.write(dat)
+ with col.open(w[len(prefix):], "wb") as f1:
+ with col.open(os.path.join("original", w[len(prefix):]), "wb") as f3:
+ with open(uri_file_path(w), "rb") as f2:
dat = f2.read(65536)
+ while dat:
+ f1.write(dat)
+ f3.write(dat)
+ dat = f2.read(65536)
+
+ # Now collect metadata: the collection name and git properties.
toolname = tool.tool.get("label") or tool.metadata.get("label") or os.path.basename(tool.tool["id"])
if git_info and git_info.get("http://arvados.org/cwl#gitDescribe"):
toolname = "%s (%s)" % (toolname, git_info.get("http://arvados.org/cwl#gitDescribe"))
- toolfile = tool.tool["id"][n+1:]
+ toolfile = tool.tool["id"][len(prefix):]
properties = {
"type": "workflow",
p = g.split("#", 1)[1]
properties["arv:"+p] = git_info[g]
- col.save_new(name=toolname, owner_uuid=arvRunner.project_uuid, ensure_unique_name=True, properties=properties)
+ # Check if a collection with the same content already exists in the target project. If so, just use that one.
+ existing = arvRunner.api.collections().list(filters=[["portable_data_hash", "=", col.portable_data_hash()],
+ ["owner_uuid", "=", arvRunner.project_uuid]]).execute(num_retries=arvRunner.num_retries)
- adjustDirObjs(job_order, trim_listing)
- adjustFileObjs(job_order, trim_anonymous_location)
- adjustDirObjs(job_order, trim_anonymous_location)
+ if len(existing["items"]) == 0:
+ toolname = toolname.replace("/", " ")
+ col.save_new(name=toolname, owner_uuid=arvRunner.project_uuid, ensure_unique_name=True, properties=properties)
+ logger.info("Workflow uploaded to %s", col.manifest_locator())
+ else:
+ logger.info("Workflow uploaded to %s", existing["items"][0]["uuid"])
- # now construct the wrapper
+ # Now that we've updated the workflow and saved it to a
+ # collection, we're going to construct a minimal "wrapper"
+ # workflow which consists of only of input and output parameters
+ # connected to a single step that runs the real workflow.
+
+ runfile = "keep:%s/%s" % (col.portable_data_hash(), toolfile)
step = {
"id": "#main/" + toolname,
"in": [],
"out": [],
- "run": "keep:%s/%s" % (col.portable_data_hash(), toolfile),
+ "run": runfile,
"label": name
}
wf_runner_resources = {"class": "http://arvados.org/cwl#WorkflowRunnerResources"}
hints.append(wf_runner_resources)
- # uncomment me
- wf_runner_resources["acrContainerImage"] = arvados_jobs_image(arvRunner,
- submit_runner_image or "arvados/jobs:"+__version__,
- runtimeContext)
+ if "acrContainerImage" not in wf_runner_resources:
+ wf_runner_resources["acrContainerImage"] = arvados_jobs_image(arvRunner,
+ submit_runner_image or "arvados/jobs:"+__version__,
+ runtimeContext)
if submit_runner_ram:
wf_runner_resources["ramMin"] = submit_runner_ram
+ # Remove a few redundant fields from the "job order" (aka input
+ # object or input parameters). In the situation where we're
+ # creating or updating a workflow record, any values in the job
+ # order get copied over as default values for input parameters.
+ adjustDirObjs(job_order, trim_listing)
+ adjustFileObjs(job_order, trim_anonymous_location)
+ adjustDirObjs(job_order, trim_anonymous_location)
+
newinputs = []
for i in main["inputs"]:
inp = {}
if hints:
wrapper["hints"] = hints
+ # Schema definitions (this lets you define things like record
+ # types) require a special handling.
+
+ for i, r in enumerate(wrapper["requirements"]):
+ if r["class"] == "SchemaDefRequirement":
+ wrapper["requirements"][i] = fix_schemadef(r, main["id"], tool.doc_loader.expand_url, merged_map, jobmapper, col.portable_data_hash())
+
+ update_refs(arvRunner.api, wrapper, main["id"], tool.doc_loader.expand_url, merged_map, jobmapper, runtimeContext, main["id"]+"#", "#main/")
+
doc = {"cwlVersion": "v1.2", "$graph": [wrapper]}
if git_info:
for g in git_info:
doc[g] = git_info[g]
- update_refs(wrapper, main["id"], tool.doc_loader.expand_url, merged_map, False, runtimeContext)
+ # Remove any lingering file references.
+ drop_ids(wrapper)
return doc
return call.execute(num_retries=arvRunner.num_retries)["uuid"]
-def upload_workflow(arvRunner, tool, job_order, project_uuid,
- runtimeContext, uuid=None,
- submit_runner_ram=0, name=None, merged_map=None,
- submit_runner_image=None,
- git_info=None):
-
- packed = packed_workflow(arvRunner, tool, merged_map, runtimeContext, git_info)
-
- adjustDirObjs(job_order, trim_listing)
- adjustFileObjs(job_order, trim_anonymous_location)
- adjustDirObjs(job_order, trim_anonymous_location)
-
- main = [p for p in packed["$graph"] if p["id"] == "#main"][0]
- for inp in main["inputs"]:
- sn = shortname(inp["id"])
- if sn in job_order:
- inp["default"] = job_order[sn]
-
- if not name:
- name = tool.tool.get("label", os.path.basename(tool.tool["id"]))
-
- upload_dependencies(arvRunner, name, tool.doc_loader,
- packed, tool.tool["id"],
- runtimeContext)
-
- wf_runner_resources = None
-
- hints = main.get("hints", [])
- found = False
- for h in hints:
- if h["class"] == "http://arvados.org/cwl#WorkflowRunnerResources":
- wf_runner_resources = h
- found = True
- break
- if not found:
- wf_runner_resources = {"class": "http://arvados.org/cwl#WorkflowRunnerResources"}
- hints.append(wf_runner_resources)
-
- wf_runner_resources["acrContainerImage"] = arvados_jobs_image(arvRunner,
- submit_runner_image or "arvados/jobs:"+__version__,
- runtimeContext)
-
- if submit_runner_ram:
- wf_runner_resources["ramMin"] = submit_runner_ram
-
- main["hints"] = hints
-
- wrapper = make_wrapper_workflow(arvRunner, main, packed, project_uuid, name, git_info, tool)
-
- body = {
- "workflow": {
- "name": name,
- "description": tool.tool.get("doc", ""),
- "definition": wrapper
- }}
- if project_uuid:
- body["workflow"]["owner_uuid"] = project_uuid
-
- if uuid:
- call = arvRunner.api.workflows().update(uuid=uuid, body=body)
- else:
- call = arvRunner.api.workflows().create(body=body)
- return call.execute(num_retries=arvRunner.num_retries)["uuid"]
-
def dedup_reqs(reqs):
dedup = {}
for r in reversed(reqs):
runtimeContext = runtimeContext.copy()
runtimeContext.toplevel = True # Preserve behavior for #13365
- builder = make_builder({shortname(k): v for k,v in viewitems(joborder)}, self.hints, self.requirements,
+ builder = make_builder({shortname(k): v for k, v in joborder.items()}, self.hints, self.requirements,
runtimeContext, self.metadata)
runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)
return super(ArvadosWorkflowStep, self).job(joborder, output_callback, runtimeContext)
self.dynamic_resource_req = []
self.static_resource_req = []
self.wf_reffiles = []
- self.loadingContext = loadingContext
- super(ArvadosWorkflow, self).__init__(toolpath_object, loadingContext)
- self.cluster_target_req, _ = self.get_requirement("http://arvados.org/cwl#ClusterTarget")
+ self.loadingContext = loadingContext.copy()
- def job(self, joborder, output_callback, runtimeContext):
+ self.requirements = copy.deepcopy(getdefault(loadingContext.requirements, []))
+ tool_requirements = toolpath_object.get("requirements", [])
+ self.hints = copy.deepcopy(getdefault(loadingContext.hints, []))
+ tool_hints = toolpath_object.get("hints", [])
- builder = make_builder(joborder, self.hints, self.requirements, runtimeContext, self.metadata)
- runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)
+ workflow_runner_req, _ = self.get_requirement("http://arvados.org/cwl#WorkflowRunnerResources")
+ if workflow_runner_req and workflow_runner_req.get("acrContainerImage"):
+ self.loadingContext.default_docker_image = workflow_runner_req.get("acrContainerImage")
- req, _ = self.get_requirement("http://arvados.org/cwl#RunInSingleContainer")
- if not req:
- return super(ArvadosWorkflow, self).job(joborder, output_callback, runtimeContext)
+ super(ArvadosWorkflow, self).__init__(toolpath_object, self.loadingContext)
+ self.cluster_target_req, _ = self.get_requirement("http://arvados.org/cwl#ClusterTarget")
- # RunInSingleContainer is true
+ def runInSingleContainer(self, joborder, output_callback, runtimeContext, builder):
with SourceLine(self.tool, None, WorkflowException, logger.isEnabledFor(logging.DEBUG)):
if "id" not in self.tool:
raise WorkflowException("%s object must have 'id'" % (self.tool["class"]))
dyn = False
for k in max_res_pars + sum_res_pars:
if k in req:
- if isinstance(req[k], basestring):
+ if isinstance(req[k], str):
if item["id"] == "#main":
# only the top-level requirements/hints may contain expressions
self.dynamic_resource_req.append(req)
})
return ArvadosCommandTool(self.arvrunner, wf_runner, self.loadingContext).job(joborder_resolved, output_callback, runtimeContext)
+
+ def separateRunner(self, joborder, output_callback, runtimeContext, req, builder):
+
+ name = runtimeContext.name
+
+ rpn = req.get("runnerProcessName")
+ if rpn:
+ name = builder.do_eval(rpn)
+
+ return RunnerContainer(self.arvrunner,
+ self,
+ self.loadingContext,
+ runtimeContext.enable_reuse,
+ None,
+ None,
+ submit_runner_ram=runtimeContext.submit_runner_ram,
+ name=name,
+ on_error=runtimeContext.on_error,
+ submit_runner_image=runtimeContext.submit_runner_image,
+ intermediate_output_ttl=runtimeContext.intermediate_output_ttl,
+ merged_map=None,
+ priority=runtimeContext.priority,
+ secret_store=self.arvrunner.secret_store,
+ collection_cache_size=runtimeContext.collection_cache_size,
+ collection_cache_is_default=self.arvrunner.should_estimate_cache_size,
+ git_info=runtimeContext.git_info,
+ reuse_runner=True).job(joborder, output_callback, runtimeContext)
+
+
+ def job(self, joborder, output_callback, runtimeContext):
+
+ builder = make_builder(joborder, self.hints, self.requirements, runtimeContext, self.metadata)
+ runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)
+
+ req, _ = self.get_requirement("http://arvados.org/cwl#RunInSingleContainer")
+ if req:
+ return self.runInSingleContainer(joborder, output_callback, runtimeContext, builder)
+
+ req, _ = self.get_requirement("http://arvados.org/cwl#SeparateRunner")
+ if req:
+ return self.separateRunner(joborder, output_callback, runtimeContext, req, builder)
+
+ return super(ArvadosWorkflow, self).job(joborder, output_callback, runtimeContext)
+
+
def make_workflow_step(self,
toolpath_object, # type: Dict[Text, Any]
pos, # type: int