X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/62ede2cf371f51cbe8bac07c36ddf904e428262b..7bdffdeb9ccec113d1d9b848423be60d85a501ed:/sdk/cwl/arvados_cwl/arvworkflow.py diff --git a/sdk/cwl/arvados_cwl/arvworkflow.py b/sdk/cwl/arvados_cwl/arvworkflow.py index cc3a51d801..3ad2c6419a 100644 --- a/sdk/cwl/arvados_cwl/arvworkflow.py +++ b/sdk/cwl/arvados_cwl/arvworkflow.py @@ -29,7 +29,7 @@ from cwltool.load_tool import fetch_document, resolve_and_validate_document from cwltool.process import shortname, uniquename from cwltool.workflow import Workflow, WorkflowException, WorkflowStep from cwltool.utils import adjustFileObjs, adjustDirObjs, visit_class, normalizeFilesDirs -from cwltool.context import LoadingContext +from cwltool.context import LoadingContext, getdefault from schema_salad.ref_resolver import file_uri, uri_file_path @@ -41,6 +41,8 @@ from .runner import (upload_dependencies, packed_workflow, upload_workflow_colle from .pathmapper import ArvPathMapper, trim_listing from .arvtool import ArvadosCommandTool, set_cluster_target from ._version import __version__ +from .util import common_prefix +from .arvdocker import arv_docker_get_image from .perf import Perf @@ -50,6 +52,21 @@ metrics = logging.getLogger('arvados.cwl-runner.metrics') max_res_pars = ("coresMin", "coresMax", "ramMin", "ramMax", "tmpdirMin", "tmpdirMax") sum_res_pars = ("outdirMin", "outdirMax") +_basetype_re = re.compile(r'''(?: +Directory +|File +|array +|boolean +|double +|enum +|float +|int +|long +|null +|record +|string +)(?:\[\])?\??''', re.VERBOSE) + def make_wrapper_workflow(arvRunner, main, packed, project_uuid, name, git_info, tool): col = arvados.collection.Collection(api_client=arvRunner.api, keep_client=arvRunner.keep_client) @@ -160,24 +177,16 @@ def rel_ref(s, baseuri, urlexpander, merged_map, jobmapper): return os.path.join(r, p3) def is_basetype(tp): - basetypes = ("null", "boolean", "int", "long", "float", "double", "string", "File", "Directory", "record", "array", "enum") - for b in basetypes: - if re.match(b+"(\[\])?\??", tp): - return True - return False - - -def update_refs(d, baseuri, urlexpander, merged_map, jobmapper, set_block_style, runtimeContext, prefix, replacePrefix): - if set_block_style and (isinstance(d, CommentedSeq) or isinstance(d, CommentedMap)): - d.fa.set_block_style() + return _basetype_re.match(tp) is not None +def update_refs(api, d, baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix): if isinstance(d, MutableSequence): for i, s in enumerate(d): if prefix and isinstance(s, str): if s.startswith(prefix): d[i] = replacePrefix+s[len(prefix):] else: - update_refs(s, baseuri, urlexpander, merged_map, jobmapper, set_block_style, runtimeContext, prefix, replacePrefix) + update_refs(api, s, baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix) elif isinstance(d, MutableMapping): for field in ("id", "name"): if isinstance(d.get(field), str) and d[field].startswith("_:"): @@ -190,8 +199,8 @@ def update_refs(d, baseuri, urlexpander, merged_map, jobmapper, set_block_style, baseuri = urlexpander(d["name"], baseuri, scoped_id=True) if d.get("class") == "DockerRequirement": - dockerImageId = d.get("dockerImageId") or d.get("dockerPull") - d["http://arvados.org/cwl#dockerCollectionPDH"] = runtimeContext.cached_docker_lookups.get(dockerImageId) + d["http://arvados.org/cwl#dockerCollectionPDH"] = arv_docker_get_image(api, d, False, + runtimeContext) for field in d: if field in ("location", "run", "name") and isinstance(d[field], str): @@ -214,15 +223,21 @@ def update_refs(d, baseuri, urlexpander, merged_map, jobmapper, set_block_style, if isinstance(d["inputs"][inp], str) and not is_basetype(d["inputs"][inp]): d["inputs"][inp] = rel_ref(d["inputs"][inp], baseuri, urlexpander, merged_map, jobmapper) if isinstance(d["inputs"][inp], MutableMapping): - update_refs(d["inputs"][inp], baseuri, urlexpander, merged_map, jobmapper, set_block_style, runtimeContext, prefix, replacePrefix) + update_refs(api, d["inputs"][inp], baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix) continue + if field in ("requirements", "hints") and isinstance(d[field], MutableMapping): + dr = d[field].get("DockerRequirement") + if dr: + dr["http://arvados.org/cwl#dockerCollectionPDH"] = arv_docker_get_image(api, dr, False, + runtimeContext) + if field == "$schemas": for n, s in enumerate(d["$schemas"]): d["$schemas"][n] = rel_ref(d["$schemas"][n], baseuri, urlexpander, merged_map, jobmapper) continue - update_refs(d[field], baseuri, urlexpander, merged_map, jobmapper, set_block_style, runtimeContext, prefix, replacePrefix) + update_refs(api, d[field], baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix) def fix_schemadef(req, baseuri, urlexpander, merged_map, jobmapper, pdh): @@ -238,6 +253,7 @@ def fix_schemadef(req, baseuri, urlexpander, merged_map, jobmapper, pdh): merged_map[mm].resolved[r] = rename return req + def drop_ids(d): if isinstance(d, MutableSequence): for i, s in enumerate(d): @@ -264,6 +280,10 @@ def upload_workflow(arvRunner, tool, job_order, project_uuid, import_files = set() include_files = set() + # The document loader index will have entries for all the files + # that were loaded in the process of parsing the entire workflow + # (including subworkflows, tools, imports, etc). We use this to + # get compose a list of the workflow file dependencies. for w in tool.doc_loader.idx: if w.startswith("file://"): workflow_files.add(urllib.parse.urldefrag(w)[0]) @@ -276,27 +296,26 @@ def upload_workflow(arvRunner, tool, job_order, project_uuid, all_files = workflow_files | import_files | include_files - n = 7 - allmatch = True - if firstfile: - while allmatch: - n += 1 - for f in all_files: - if len(f)-1 < n: - n -= 1 - allmatch = False - break - if f[n] != firstfile[n]: - allmatch = False - break - - while firstfile[n] != "/": - n -= 1 + # Find the longest common prefix among all the file names. We'll + # use this to recreate the directory structure in a keep + # collection with correct relative references. + prefix = common_prefix(firstfile, all_files) if firstfile else "" + col = arvados.collection.Collection(api_client=arvRunner.api) + # Now go through all the files and update references to other + # files. We previously scanned for file dependencies, these are + # are passed in as merged_map. + # + # note about merged_map: we upload dependencies of each process + # object (CommandLineTool/Workflow) to a separate collection. + # That way, when the user edits something, this limits collection + # PDH changes to just that tool, and minimizes situations where + # small changes break container reuse for the whole workflow. + # for w in workflow_files | import_files: - # 1. load YAML + # 1. load the YAML file text = tool.doc_loader.fetch_text(w) if isinstance(text, bytes): @@ -307,25 +326,33 @@ def upload_workflow(arvRunner, tool, job_order, project_uuid, yamlloader = schema_salad.utils.yaml_no_ts() result = yamlloader.load(textIO) - set_block_style = False - if result.fa.flow_style(): - set_block_style = True + # If the whole document is in "flow style" it is probably JSON + # formatted. We'll re-export it as JSON because the + # ruamel.yaml round-trip mode is a lie and only preserves + # "block style" formatting and not "flow style" formatting. + export_as_json = result.fa.flow_style() # 2. find $import, $include, $schema, run, location # 3. update field value - update_refs(result, w, tool.doc_loader.expand_url, merged_map, jobmapper, set_block_style, runtimeContext, "", "") + update_refs(arvRunner.api, result, w, tool.doc_loader.expand_url, merged_map, jobmapper, runtimeContext, "", "") - with col.open(w[n+1:], "wt") as f: - # yamlloader.dump(result, stream=sys.stdout) - yamlloader.dump(result, stream=f) + # Write the updated file to the collection. + with col.open(w[len(prefix):], "wt") as f: + if export_as_json: + json.dump(result, f, indent=4, separators=(',',': ')) + else: + yamlloader.dump(result, stream=f) - with col.open(os.path.join("original", w[n+1:]), "wt") as f: + # Also store a verbatim copy of the original files + with col.open(os.path.join("original", w[len(prefix):]), "wt") as f: f.write(text) + # Upload files referenced by $include directives, these are used + # unchanged and don't need to be updated. for w in include_files: - with col.open(w[n+1:], "wb") as f1: - with col.open(os.path.join("original", w[n+1:]), "wb") as f3: + with col.open(w[len(prefix):], "wb") as f1: + with col.open(os.path.join("original", w[len(prefix):]), "wb") as f3: with open(uri_file_path(w), "rb") as f2: dat = f2.read(65536) while dat: @@ -333,12 +360,13 @@ def upload_workflow(arvRunner, tool, job_order, project_uuid, f3.write(dat) dat = f2.read(65536) + # Now collect metadata: the collection name and git properties. toolname = tool.tool.get("label") or tool.metadata.get("label") or os.path.basename(tool.tool["id"]) if git_info and git_info.get("http://arvados.org/cwl#gitDescribe"): toolname = "%s (%s)" % (toolname, git_info.get("http://arvados.org/cwl#gitDescribe")) - toolfile = tool.tool["id"][n+1:] + toolfile = tool.tool["id"][len(prefix):] properties = { "type": "workflow", @@ -350,19 +378,21 @@ def upload_workflow(arvRunner, tool, job_order, project_uuid, p = g.split("#", 1)[1] properties["arv:"+p] = git_info[g] + # Check if a collection with the same content already exists in the target project. If so, just use that one. existing = arvRunner.api.collections().list(filters=[["portable_data_hash", "=", col.portable_data_hash()], ["owner_uuid", "=", arvRunner.project_uuid]]).execute(num_retries=arvRunner.num_retries) + if len(existing["items"]) == 0: + toolname = toolname.replace("/", " ") col.save_new(name=toolname, owner_uuid=arvRunner.project_uuid, ensure_unique_name=True, properties=properties) logger.info("Workflow uploaded to %s", col.manifest_locator()) else: logger.info("Workflow uploaded to %s", existing["items"][0]["uuid"]) - adjustDirObjs(job_order, trim_listing) - adjustFileObjs(job_order, trim_anonymous_location) - adjustDirObjs(job_order, trim_anonymous_location) - - # now construct the wrapper + # Now that we've updated the workflow and saved it to a + # collection, we're going to construct a minimal "wrapper" + # workflow which consists of only of input and output parameters + # connected to a single step that runs the real workflow. runfile = "keep:%s/%s" % (col.portable_data_hash(), toolfile) @@ -389,13 +419,22 @@ def upload_workflow(arvRunner, tool, job_order, project_uuid, wf_runner_resources = {"class": "http://arvados.org/cwl#WorkflowRunnerResources"} hints.append(wf_runner_resources) - wf_runner_resources["acrContainerImage"] = arvados_jobs_image(arvRunner, - submit_runner_image or "arvados/jobs:"+__version__, - runtimeContext) + if "acrContainerImage" not in wf_runner_resources: + wf_runner_resources["acrContainerImage"] = arvados_jobs_image(arvRunner, + submit_runner_image or "arvados/jobs:"+__version__, + runtimeContext) if submit_runner_ram: wf_runner_resources["ramMin"] = submit_runner_ram + # Remove a few redundant fields from the "job order" (aka input + # object or input parameters). In the situation where we're + # creating or updating a workflow record, any values in the job + # order get copied over as default values for input parameters. + adjustDirObjs(job_order, trim_listing) + adjustFileObjs(job_order, trim_anonymous_location) + adjustDirObjs(job_order, trim_anonymous_location) + newinputs = [] for i in main["inputs"]: inp = {} @@ -446,9 +485,14 @@ def upload_workflow(arvRunner, tool, job_order, project_uuid, if hints: wrapper["hints"] = hints - # 1. check for SchemaDef - # 2. do what pack does - # 3. fix inputs + # Schema definitions (this lets you define things like record + # types) require a special handling. + + for i, r in enumerate(wrapper["requirements"]): + if r["class"] == "SchemaDefRequirement": + wrapper["requirements"][i] = fix_schemadef(r, main["id"], tool.doc_loader.expand_url, merged_map, jobmapper, col.portable_data_hash()) + + update_refs(arvRunner.api, wrapper, main["id"], tool.doc_loader.expand_url, merged_map, jobmapper, runtimeContext, main["id"]+"#", "#main/") doc = {"cwlVersion": "v1.2", "$graph": [wrapper]} @@ -456,12 +500,6 @@ def upload_workflow(arvRunner, tool, job_order, project_uuid, for g in git_info: doc[g] = git_info[g] - for i, r in enumerate(wrapper["requirements"]): - if r["class"] == "SchemaDefRequirement": - wrapper["requirements"][i] = fix_schemadef(r, main["id"], tool.doc_loader.expand_url, merged_map, jobmapper, col.portable_data_hash()) - - update_refs(wrapper, main["id"], tool.doc_loader.expand_url, merged_map, jobmapper, False, runtimeContext, main["id"]+"#", "#main/") - # Remove any lingering file references. drop_ids(wrapper) @@ -564,8 +602,18 @@ class ArvadosWorkflow(Workflow): self.dynamic_resource_req = [] self.static_resource_req = [] self.wf_reffiles = [] - self.loadingContext = loadingContext - super(ArvadosWorkflow, self).__init__(toolpath_object, loadingContext) + self.loadingContext = loadingContext.copy() + + self.requirements = copy.deepcopy(getdefault(loadingContext.requirements, [])) + tool_requirements = toolpath_object.get("requirements", []) + self.hints = copy.deepcopy(getdefault(loadingContext.hints, [])) + tool_hints = toolpath_object.get("hints", []) + + workflow_runner_req, _ = self.get_requirement("http://arvados.org/cwl#WorkflowRunnerResources") + if workflow_runner_req and workflow_runner_req.get("acrContainerImage"): + self.loadingContext.default_docker_image = workflow_runner_req.get("acrContainerImage") + + super(ArvadosWorkflow, self).__init__(toolpath_object, self.loadingContext) self.cluster_target_req, _ = self.get_requirement("http://arvados.org/cwl#ClusterTarget") def job(self, joborder, output_callback, runtimeContext):