from cwltool.process import shortname, uniquename
from cwltool.workflow import Workflow, WorkflowException, WorkflowStep
from cwltool.utils import adjustFileObjs, adjustDirObjs, visit_class, normalizeFilesDirs
-from cwltool.context import LoadingContext
+from cwltool.context import LoadingContext, getdefault
from schema_salad.ref_resolver import file_uri, uri_file_path
from .pathmapper import ArvPathMapper, trim_listing
from .arvtool import ArvadosCommandTool, set_cluster_target
from ._version import __version__
+from .util import common_prefix
+from .arvdocker import arv_docker_get_image
from .perf import Perf
max_res_pars = ("coresMin", "coresMax", "ramMin", "ramMax", "tmpdirMin", "tmpdirMax")
sum_res_pars = ("outdirMin", "outdirMax")
+_basetype_re = re.compile(r'''(?:
+Directory
+|File
+|array
+|boolean
+|double
+|enum
+|float
+|int
+|long
+|null
+|record
+|string
+)(?:\[\])?\??''', re.VERBOSE)
+
def make_wrapper_workflow(arvRunner, main, packed, project_uuid, name, git_info, tool):
col = arvados.collection.Collection(api_client=arvRunner.api,
keep_client=arvRunner.keep_client)
if s.startswith("keep:"):
return s
- #print("BBB", s, baseuri)
uri = urlexpander(s, baseuri)
- #print("CCC", uri)
if uri.startswith("keep:"):
return uri
fileuri = urllib.parse.urldefrag(baseuri)[0]
- #print("BBB", s, baseuri, uri)
-
for u in (baseuri, fileuri):
if u in merged_map:
replacements = merged_map[u].resolved
- #print("RRR", u, uri, replacements)
if uri in replacements:
return replacements[uri]
p2 = os.path.dirname(uri_file_path(uri))
p3 = os.path.basename(uri_file_path(uri))
- #print("PPP", p1, p2, p3)
-
r = os.path.relpath(p2, p1)
if r == ".":
r = ""
- #print("RRR", r)
-
return os.path.join(r, p3)
def is_basetype(tp):
- basetypes = ("null", "boolean", "int", "long", "float", "double", "string", "File", "Directory", "record", "array", "enum")
- for b in basetypes:
- if re.match(b+"(\[\])?\??", tp):
- return True
- return False
-
-
-def update_refs(d, baseuri, urlexpander, merged_map, jobmapper, set_block_style, runtimeContext, prefix, replacePrefix):
- if set_block_style and (isinstance(d, CommentedSeq) or isinstance(d, CommentedMap)):
- d.fa.set_block_style()
+ return _basetype_re.match(tp) is not None
+def update_refs(api, d, baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix):
if isinstance(d, MutableSequence):
for i, s in enumerate(d):
if prefix and isinstance(s, str):
if s.startswith(prefix):
d[i] = replacePrefix+s[len(prefix):]
else:
- update_refs(s, baseuri, urlexpander, merged_map, jobmapper, set_block_style, runtimeContext, prefix, replacePrefix)
+ update_refs(api, s, baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix)
elif isinstance(d, MutableMapping):
for field in ("id", "name"):
if isinstance(d.get(field), str) and d[field].startswith("_:"):
baseuri = urlexpander(d["name"], baseuri, scoped_id=True)
if d.get("class") == "DockerRequirement":
- dockerImageId = d.get("dockerImageId") or d.get("dockerPull")
- d["http://arvados.org/cwl#dockerCollectionPDH"] = runtimeContext.cached_docker_lookups.get(dockerImageId)
+ d["http://arvados.org/cwl#dockerCollectionPDH"] = arv_docker_get_image(api, d, False,
+ runtimeContext)
for field in d:
if field in ("location", "run", "name") and isinstance(d[field], str):
if isinstance(d["inputs"][inp], str) and not is_basetype(d["inputs"][inp]):
d["inputs"][inp] = rel_ref(d["inputs"][inp], baseuri, urlexpander, merged_map, jobmapper)
if isinstance(d["inputs"][inp], MutableMapping):
- update_refs(d["inputs"][inp], baseuri, urlexpander, merged_map, jobmapper, set_block_style, runtimeContext, prefix, replacePrefix)
+ update_refs(api, d["inputs"][inp], baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix)
continue
+ if field in ("requirements", "hints") and isinstance(d[field], MutableMapping):
+ dr = d[field].get("DockerRequirement")
+ if dr:
+ dr["http://arvados.org/cwl#dockerCollectionPDH"] = arv_docker_get_image(api, dr, False,
+ runtimeContext)
+
if field == "$schemas":
for n, s in enumerate(d["$schemas"]):
d["$schemas"][n] = rel_ref(d["$schemas"][n], baseuri, urlexpander, merged_map, jobmapper)
continue
- update_refs(d[field], baseuri, urlexpander, merged_map, jobmapper, set_block_style, runtimeContext, prefix, replacePrefix)
+ update_refs(api, d[field], baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix)
def fix_schemadef(req, baseuri, urlexpander, merged_map, jobmapper, pdh):
path, frag = urllib.parse.urldefrag(r)
rel = rel_ref(r, baseuri, urlexpander, merged_map, jobmapper)
merged_map.setdefault(path, FileUpdates({}, {}))
- #print("PPP", path, r, frag)
rename = "keep:%s/%s" %(pdh, rel)
- #rename = "#%s" % frag
for mm in merged_map:
merged_map[mm].resolved[r] = rename
return req
+
def drop_ids(d):
if isinstance(d, MutableSequence):
for i, s in enumerate(d):
drop_ids(d[field])
-def new_upload_workflow(arvRunner, tool, job_order, project_uuid,
+def upload_workflow(arvRunner, tool, job_order, project_uuid,
runtimeContext,
uuid=None,
submit_runner_ram=0, name=None, merged_map=None,
import_files = set()
include_files = set()
+ # The document loader index will have entries for all the files
+ # that were loaded in the process of parsing the entire workflow
+ # (including subworkflows, tools, imports, etc). We use this to
+ # get compose a list of the workflow file dependencies.
for w in tool.doc_loader.idx:
if w.startswith("file://"):
workflow_files.add(urllib.parse.urldefrag(w)[0])
all_files = workflow_files | import_files | include_files
- n = 7
- allmatch = True
- if firstfile:
- while allmatch:
- n += 1
- for f in all_files:
- if len(f)-1 < n:
- n -= 1
- allmatch = False
- break
- if f[n] != firstfile[n]:
- allmatch = False
- break
-
- while firstfile[n] != "/":
- n -= 1
+ # Find the longest common prefix among all the file names. We'll
+ # use this to recreate the directory structure in a keep
+ # collection with correct relative references.
+ prefix = common_prefix(firstfile, all_files) if firstfile else ""
- col = arvados.collection.Collection(api_client=arvRunner.api)
- #print(merged_map)
+ col = arvados.collection.Collection(api_client=arvRunner.api)
+ # Now go through all the files and update references to other
+ # files. We previously scanned for file dependencies, these are
+ # are passed in as merged_map.
+ #
+ # note about merged_map: we upload dependencies of each process
+ # object (CommandLineTool/Workflow) to a separate collection.
+ # That way, when the user edits something, this limits collection
+ # PDH changes to just that tool, and minimizes situations where
+ # small changes break container reuse for the whole workflow.
+ #
for w in workflow_files | import_files:
- # 1. load YAML
+ # 1. load the YAML file
text = tool.doc_loader.fetch_text(w)
if isinstance(text, bytes):
yamlloader = schema_salad.utils.yaml_no_ts()
result = yamlloader.load(textIO)
- set_block_style = False
- if result.fa.flow_style():
- set_block_style = True
+ # If the whole document is in "flow style" it is probably JSON
+ # formatted. We'll re-export it as JSON because the
+ # ruamel.yaml round-trip mode is a lie and only preserves
+ # "block style" formatting and not "flow style" formatting.
+ export_as_json = result.fa.flow_style()
# 2. find $import, $include, $schema, run, location
# 3. update field value
- update_refs(result, w, tool.doc_loader.expand_url, merged_map, jobmapper, set_block_style, runtimeContext, "", "")
+ update_refs(arvRunner.api, result, w, tool.doc_loader.expand_url, merged_map, jobmapper, runtimeContext, "", "")
- with col.open(w[n+1:], "wt") as f:
- #print(yamlloader.dump(result, stream=sys.stdout))
- yamlloader.dump(result, stream=f)
+ # Write the updated file to the collection.
+ with col.open(w[len(prefix):], "wt") as f:
+ if export_as_json:
+ json.dump(result, f, indent=4, separators=(',',': '))
+ else:
+ yamlloader.dump(result, stream=f)
- with col.open(os.path.join("original", w[n+1:]), "wt") as f:
+ # Also store a verbatim copy of the original files
+ with col.open(os.path.join("original", w[len(prefix):]), "wt") as f:
f.write(text)
+ # Upload files referenced by $include directives, these are used
+ # unchanged and don't need to be updated.
for w in include_files:
- with col.open(w[n+1:], "wb") as f1:
- with col.open(os.path.join("original", w[n+1:]), "wb") as f3:
+ with col.open(w[len(prefix):], "wb") as f1:
+ with col.open(os.path.join("original", w[len(prefix):]), "wb") as f3:
with open(uri_file_path(w), "rb") as f2:
dat = f2.read(65536)
while dat:
f3.write(dat)
dat = f2.read(65536)
+ # Now collect metadata: the collection name and git properties.
toolname = tool.tool.get("label") or tool.metadata.get("label") or os.path.basename(tool.tool["id"])
if git_info and git_info.get("http://arvados.org/cwl#gitDescribe"):
toolname = "%s (%s)" % (toolname, git_info.get("http://arvados.org/cwl#gitDescribe"))
- toolfile = tool.tool["id"][n+1:]
+ toolfile = tool.tool["id"][len(prefix):]
properties = {
"type": "workflow",
p = g.split("#", 1)[1]
properties["arv:"+p] = git_info[g]
- col.save_new(name=toolname, owner_uuid=arvRunner.project_uuid, ensure_unique_name=True, properties=properties)
-
- logger.info("Workflow uploaded to %s", col.manifest_locator())
+ # Check if a collection with the same content already exists in the target project. If so, just use that one.
+ existing = arvRunner.api.collections().list(filters=[["portable_data_hash", "=", col.portable_data_hash()],
+ ["owner_uuid", "=", arvRunner.project_uuid]]).execute(num_retries=arvRunner.num_retries)
- adjustDirObjs(job_order, trim_listing)
- adjustFileObjs(job_order, trim_anonymous_location)
- adjustDirObjs(job_order, trim_anonymous_location)
+ if len(existing["items"]) == 0:
+ toolname = toolname.replace("/", " ")
+ col.save_new(name=toolname, owner_uuid=arvRunner.project_uuid, ensure_unique_name=True, properties=properties)
+ logger.info("Workflow uploaded to %s", col.manifest_locator())
+ else:
+ logger.info("Workflow uploaded to %s", existing["items"][0]["uuid"])
- # now construct the wrapper
+ # Now that we've updated the workflow and saved it to a
+ # collection, we're going to construct a minimal "wrapper"
+ # workflow which consists of only of input and output parameters
+ # connected to a single step that runs the real workflow.
runfile = "keep:%s/%s" % (col.portable_data_hash(), toolfile)
wf_runner_resources = {"class": "http://arvados.org/cwl#WorkflowRunnerResources"}
hints.append(wf_runner_resources)
- wf_runner_resources["acrContainerImage"] = arvados_jobs_image(arvRunner,
- submit_runner_image or "arvados/jobs:"+__version__,
- runtimeContext)
+ if "acrContainerImage" not in wf_runner_resources:
+ wf_runner_resources["acrContainerImage"] = arvados_jobs_image(arvRunner,
+ submit_runner_image or "arvados/jobs:"+__version__,
+ runtimeContext)
if submit_runner_ram:
wf_runner_resources["ramMin"] = submit_runner_ram
+ # Remove a few redundant fields from the "job order" (aka input
+ # object or input parameters). In the situation where we're
+ # creating or updating a workflow record, any values in the job
+ # order get copied over as default values for input parameters.
+ adjustDirObjs(job_order, trim_listing)
+ adjustFileObjs(job_order, trim_anonymous_location)
+ adjustDirObjs(job_order, trim_anonymous_location)
+
newinputs = []
for i in main["inputs"]:
inp = {}
if hints:
wrapper["hints"] = hints
- # 1. check for SchemaDef
- # 2. do what pack does
- # 3. fix inputs
-
- doc = {"cwlVersion": "v1.2", "$graph": [wrapper]}
-
- if git_info:
- for g in git_info:
- doc[g] = git_info[g]
-
- #print("MMM", main["id"])
- #print(yamlloader.dump(wrapper, stream=sys.stdout))
+ # Schema definitions (this lets you define things like record
+ # types) require a special handling.
for i, r in enumerate(wrapper["requirements"]):
if r["class"] == "SchemaDefRequirement":
wrapper["requirements"][i] = fix_schemadef(r, main["id"], tool.doc_loader.expand_url, merged_map, jobmapper, col.portable_data_hash())
- # print()
- # print("merrrrged maaap", merged_map)
- # print()
- #print("update_refs", main["id"], runfile)
+ update_refs(arvRunner.api, wrapper, main["id"], tool.doc_loader.expand_url, merged_map, jobmapper, runtimeContext, main["id"]+"#", "#main/")
- #print(yamlloader.dump(wrapper, stream=sys.stdout))
+ doc = {"cwlVersion": "v1.2", "$graph": [wrapper]}
- update_refs(wrapper, main["id"], tool.doc_loader.expand_url, merged_map, jobmapper, False, runtimeContext, main["id"]+"#", "#main/")
+ if git_info:
+ for g in git_info:
+ doc[g] = git_info[g]
# Remove any lingering file references.
drop_ids(wrapper)
- #print("HHH")
-
- #print(yamlloader.dump(wrapper, stream=sys.stdout))
-
return doc
return call.execute(num_retries=arvRunner.num_retries)["uuid"]
-def upload_workflow(arvRunner, tool, job_order, project_uuid,
- runtimeContext, uuid=None,
- submit_runner_ram=0, name=None, merged_map=None,
- submit_runner_image=None,
- git_info=None):
-
- packed = packed_workflow(arvRunner, tool, merged_map, runtimeContext, git_info)
-
- adjustDirObjs(job_order, trim_listing)
- adjustFileObjs(job_order, trim_anonymous_location)
- adjustDirObjs(job_order, trim_anonymous_location)
-
- main = [p for p in packed["$graph"] if p["id"] == "#main"][0]
- for inp in main["inputs"]:
- sn = shortname(inp["id"])
- if sn in job_order:
- inp["default"] = job_order[sn]
-
- if not name:
- name = tool.tool.get("label", os.path.basename(tool.tool["id"]))
-
- upload_dependencies(arvRunner, name, tool.doc_loader,
- packed, tool.tool["id"],
- runtimeContext)
-
- wf_runner_resources = None
-
- hints = main.get("hints", [])
- found = False
- for h in hints:
- if h["class"] == "http://arvados.org/cwl#WorkflowRunnerResources":
- wf_runner_resources = h
- found = True
- break
- if not found:
- wf_runner_resources = {"class": "http://arvados.org/cwl#WorkflowRunnerResources"}
- hints.append(wf_runner_resources)
-
- wf_runner_resources["acrContainerImage"] = arvados_jobs_image(arvRunner,
- submit_runner_image or "arvados/jobs:"+__version__,
- runtimeContext)
-
- if submit_runner_ram:
- wf_runner_resources["ramMin"] = submit_runner_ram
-
- main["hints"] = hints
-
- wrapper = make_wrapper_workflow(arvRunner, main, packed, project_uuid, name, git_info, tool)
-
- body = {
- "workflow": {
- "name": name,
- "description": tool.tool.get("doc", ""),
- "definition": wrapper
- }}
- if project_uuid:
- body["workflow"]["owner_uuid"] = project_uuid
-
- if uuid:
- call = arvRunner.api.workflows().update(uuid=uuid, body=body)
- else:
- call = arvRunner.api.workflows().create(body=body)
- return call.execute(num_retries=arvRunner.num_retries)["uuid"]
-
def dedup_reqs(reqs):
dedup = {}
for r in reversed(reqs):
self.dynamic_resource_req = []
self.static_resource_req = []
self.wf_reffiles = []
- self.loadingContext = loadingContext
- super(ArvadosWorkflow, self).__init__(toolpath_object, loadingContext)
+ self.loadingContext = loadingContext.copy()
+
+ self.requirements = copy.deepcopy(getdefault(loadingContext.requirements, []))
+ tool_requirements = toolpath_object.get("requirements", [])
+ self.hints = copy.deepcopy(getdefault(loadingContext.hints, []))
+ tool_hints = toolpath_object.get("hints", [])
+
+ workflow_runner_req, _ = self.get_requirement("http://arvados.org/cwl#WorkflowRunnerResources")
+ if workflow_runner_req and workflow_runner_req.get("acrContainerImage"):
+ self.loadingContext.default_docker_image = workflow_runner_req.get("acrContainerImage")
+
+ super(ArvadosWorkflow, self).__init__(toolpath_object, self.loadingContext)
self.cluster_target_req, _ = self.get_requirement("http://arvados.org/cwl#ClusterTarget")
def job(self, joborder, output_callback, runtimeContext):