+ doc = {"cwlVersion": "v1.2", "$graph": [wrapper]}
+
+ if git_info:
+ for g in git_info:
+ doc[g] = git_info[g]
+
+ return json.dumps(doc, sort_keys=True, indent=4, separators=(',',': '))
+
+
+def rel_ref(s, baseuri, urlexpander, merged_map, jobmapper):
+ if s.startswith("keep:"):
+ return s
+
+ uri = urlexpander(s, baseuri)
+
+ if uri.startswith("keep:"):
+ return uri
+
+ fileuri = urllib.parse.urldefrag(baseuri)[0]
+
+ for u in (baseuri, fileuri):
+ if u in merged_map:
+ replacements = merged_map[u].resolved
+ if uri in replacements:
+ return replacements[uri]
+
+ if uri in jobmapper:
+ return jobmapper.mapper(uri).target
+
+ p1 = os.path.dirname(uri_file_path(fileuri))
+ p2 = os.path.dirname(uri_file_path(uri))
+ p3 = os.path.basename(uri_file_path(uri))
+
+ r = os.path.relpath(p2, p1)
+ if r == ".":
+ r = ""
+
+ return os.path.join(r, p3)
+
+def is_basetype(tp):
+ basetypes = ("null", "boolean", "int", "long", "float", "double", "string", "File", "Directory", "record", "array", "enum")
+ for b in basetypes:
+ if re.match(b+"(\[\])?\??", tp):
+ return True
+ return False
+
+
+def update_refs(d, baseuri, urlexpander, merged_map, jobmapper, set_block_style, runtimeContext, prefix, replacePrefix):
+ if set_block_style and (isinstance(d, CommentedSeq) or isinstance(d, CommentedMap)):
+ d.fa.set_block_style()
+
+ if isinstance(d, MutableSequence):
+ for i, s in enumerate(d):
+ if prefix and isinstance(s, str):
+ if s.startswith(prefix):
+ d[i] = replacePrefix+s[len(prefix):]
+ else:
+ update_refs(s, baseuri, urlexpander, merged_map, jobmapper, set_block_style, runtimeContext, prefix, replacePrefix)
+ elif isinstance(d, MutableMapping):
+ for field in ("id", "name"):
+ if isinstance(d.get(field), str) and d[field].startswith("_:"):
+ # blank node reference, was added in automatically, can get rid of it.
+ del d[field]
+
+ if "id" in d:
+ baseuri = urlexpander(d["id"], baseuri, scoped_id=True)
+ elif "name" in d and isinstance(d["name"], str):
+ baseuri = urlexpander(d["name"], baseuri, scoped_id=True)
+
+ if d.get("class") == "DockerRequirement":
+ dockerImageId = d.get("dockerImageId") or d.get("dockerPull")
+ d["http://arvados.org/cwl#dockerCollectionPDH"] = runtimeContext.cached_docker_lookups.get(dockerImageId)
+
+ for field in d:
+ if field in ("location", "run", "name") and isinstance(d[field], str):
+ d[field] = rel_ref(d[field], baseuri, urlexpander, merged_map, jobmapper)
+ continue
+
+ if field in ("$include", "$import") and isinstance(d[field], str):
+ d[field] = rel_ref(d[field], baseuri, urlexpander, {}, jobmapper)
+ continue
+
+ for t in ("type", "items"):
+ if (field == t and
+ isinstance(d[t], str) and
+ not is_basetype(d[t])):
+ d[t] = rel_ref(d[t], baseuri, urlexpander, merged_map, jobmapper)
+ continue
+
+ if field == "inputs" and isinstance(d["inputs"], MutableMapping):
+ for inp in d["inputs"]:
+ if isinstance(d["inputs"][inp], str) and not is_basetype(d["inputs"][inp]):
+ d["inputs"][inp] = rel_ref(d["inputs"][inp], baseuri, urlexpander, merged_map, jobmapper)
+ if isinstance(d["inputs"][inp], MutableMapping):
+ update_refs(d["inputs"][inp], baseuri, urlexpander, merged_map, jobmapper, set_block_style, runtimeContext, prefix, replacePrefix)
+ continue
+
+ if field == "$schemas":
+ for n, s in enumerate(d["$schemas"]):
+ d["$schemas"][n] = rel_ref(d["$schemas"][n], baseuri, urlexpander, merged_map, jobmapper)
+ continue
+
+ update_refs(d[field], baseuri, urlexpander, merged_map, jobmapper, set_block_style, runtimeContext, prefix, replacePrefix)
+
+
+def fix_schemadef(req, baseuri, urlexpander, merged_map, jobmapper, pdh):
+ req = copy.deepcopy(req)
+
+ for f in req["types"]:
+ r = f["name"]
+ path, frag = urllib.parse.urldefrag(r)
+ rel = rel_ref(r, baseuri, urlexpander, merged_map, jobmapper)
+ merged_map.setdefault(path, FileUpdates({}, {}))
+ rename = "keep:%s/%s" %(pdh, rel)
+ for mm in merged_map:
+ merged_map[mm].resolved[r] = rename
+ return req
+
+def drop_ids(d):
+ if isinstance(d, MutableSequence):
+ for i, s in enumerate(d):
+ drop_ids(s)
+ elif isinstance(d, MutableMapping):
+ if "id" in d and d["id"].startswith("file:"):
+ del d["id"]
+
+ for field in d:
+ drop_ids(d[field])
+