Merge branch '15370-loopback-dispatchcloud'
[arvados.git] / sdk / cwl / arvados_cwl / executor.py
index eace9f449f9802b097fb5c8872fe51137f0a5271..778af58ac3f7a1b71c040d5ec4f3332ecba11964 100644 (file)
@@ -31,8 +31,8 @@ from arvados.keep import KeepClient
 from arvados.errors import ApiError
 
 import arvados_cwl.util
-from .arvcontainer import RunnerContainer
-from .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps
+from .arvcontainer import RunnerContainer, cleanup_name_for_collection
+from .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps, make_builder
 from .arvtool import ArvadosCommandTool, validate_cluster_target, ArvadosExpressionTool
 from .arvworkflow import ArvadosWorkflow, upload_workflow
 from .fsaccess import CollectionFsAccess, CollectionFetcher, collectionResolver, CollectionCache, pdh_size
@@ -197,11 +197,11 @@ The 'jobs' API is no longer supported.
             handler = RuntimeStatusLoggingHandler(self.runtime_status_update)
             root_logger.addHandler(handler)
 
-        self.runtimeContext = ArvRuntimeContext(vars(arvargs))
-        self.runtimeContext.make_fs_access = partial(CollectionFsAccess,
+        self.toplevel_runtimeContext = ArvRuntimeContext(vars(arvargs))
+        self.toplevel_runtimeContext.make_fs_access = partial(CollectionFsAccess,
                                                      collection_cache=self.collection_cache)
 
-        validate_cluster_target(self, self.runtimeContext)
+        validate_cluster_target(self, self.toplevel_runtimeContext)
 
 
     def arv_make_tool(self, toolpath_object, loadingContext):
@@ -404,7 +404,7 @@ The 'jobs' API is no longer supported.
                 with SourceLine(obj, i, UnsupportedRequirement, logger.isEnabledFor(logging.DEBUG)):
                     self.check_features(v, parentfield=parentfield)
 
-    def make_output_collection(self, name, storage_classes, tagsString, outputObj):
+    def make_output_collection(self, name, storage_classes, tagsString, output_properties, outputObj):
         outputObj = copy.deepcopy(outputObj)
 
         files = []
@@ -456,7 +456,9 @@ The 'jobs' API is no longer supported.
             res = str(json.dumps(outputObj, sort_keys=True, indent=4, separators=(',',': '), ensure_ascii=False))
             f.write(res)
 
-        final.save_new(name=name, owner_uuid=self.project_uuid, storage_classes=storage_classes, ensure_unique_name=True)
+
+        final.save_new(name=name, owner_uuid=self.project_uuid, storage_classes=storage_classes,
+                       ensure_unique_name=True, properties=output_properties)
 
         logger.info("Final output collection %s \"%s\" (%s)", final.portable_data_hash(),
                     final.api_response()["name"],
@@ -486,6 +488,7 @@ The 'jobs' API is no longer supported.
                 self.api.containers().update(uuid=current['uuid'],
                                              body={
                                                  'output': self.final_output_collection.portable_data_hash(),
+                                                 'output_properties': self.final_output_collection.get_properties(),
                                              }).execute(num_retries=self.num_retries)
                 self.api.collections().update(uuid=self.final_output_collection.manifest_locator(),
                                               body={
@@ -517,7 +520,6 @@ The 'jobs' API is no longer supported.
 
         updated_tool.visit(self.check_features)
 
-        self.project_uuid = runtimeContext.project_uuid
         self.pipeline = None
         self.fs_access = runtimeContext.make_fs_access(runtimeContext.basedir)
         self.secret_store = runtimeContext.secret_store
@@ -535,6 +537,8 @@ The 'jobs' API is no longer supported.
         if runtimeContext.submit_request_uuid and self.work_api != "containers":
             raise Exception("--submit-request-uuid requires containers API, but using '{}' api".format(self.work_api))
 
+        runtimeContext = runtimeContext.copy()
+
         default_storage_classes = ",".join([k for k,v in self.api.config().get("StorageClasses", {"default": {"Default": True}}).items() if v.get("Default") is True])
         if runtimeContext.storage_classes == "default":
             runtimeContext.storage_classes = default_storage_classes
@@ -544,19 +548,25 @@ The 'jobs' API is no longer supported.
         if not runtimeContext.name:
             runtimeContext.name = self.name = updated_tool.tool.get("label") or updated_tool.metadata.get("label") or os.path.basename(updated_tool.tool["id"])
 
-        if self.runtimeContext.copy_deps is None and (runtimeContext.create_workflow or runtimeContext.update_workflow):
-            self.runtimeContext.copy_deps = True
+        if runtimeContext.copy_deps is None and (runtimeContext.create_workflow or runtimeContext.update_workflow):
+            # When creating or updating workflow record, by default
+            # always copy dependencies and ensure Docker images are up
+            # to date.
+            runtimeContext.copy_deps = True
+            runtimeContext.match_local_docker = True
 
-        if self.runtimeContext.update_workflow and self.project_uuid is None:
+        if runtimeContext.update_workflow and self.project_uuid is None:
             # If we are updating a workflow, make sure anything that
             # gets uploaded goes into the same parent project, unless
             # an alternate --project-uuid was provided.
             existing_wf = self.api.workflows().get(uuid=runtimeContext.update_workflow).execute()
-            self.project_uuid = existing_wf["owner_uuid"]
+            runtimeContext.project_uuid = existing_wf["owner_uuid"]
+
+        self.project_uuid = runtimeContext.project_uuid
 
         # Upload local file references in the job order.
         job_order = upload_job_order(self, "%s input" % runtimeContext.name,
-                                     updated_tool, job_order)
+                                     updated_tool, job_order, runtimeContext)
 
         # the last clause means: if it is a command line tool, and we
         # are going to wait for the result, and always_submit_runner
@@ -583,7 +593,7 @@ The 'jobs' API is no longer supported.
 
         # Upload direct dependencies of workflow steps, get back mapping of files to keep references.
         # Also uploads docker images.
-        merged_map = upload_workflow_deps(self, tool)
+        merged_map = upload_workflow_deps(self, tool, runtimeContext)
 
         # Recreate process object (ArvadosWorkflow or
         # ArvadosCommandTool) because tool document may have been
@@ -598,12 +608,13 @@ The 'jobs' API is no longer supported.
             # Create a pipeline template or workflow record and exit.
             if self.work_api == "containers":
                 uuid = upload_workflow(self, tool, job_order,
-                                        self.project_uuid,
-                                        uuid=runtimeContext.update_workflow,
-                                        submit_runner_ram=runtimeContext.submit_runner_ram,
-                                        name=runtimeContext.name,
-                                        merged_map=merged_map,
-                                        submit_runner_image=runtimeContext.submit_runner_image)
+                                       runtimeContext.project_uuid,
+                                       runtimeContext,
+                                       uuid=runtimeContext.update_workflow,
+                                       submit_runner_ram=runtimeContext.submit_runner_ram,
+                                       name=runtimeContext.name,
+                                       merged_map=merged_map,
+                                       submit_runner_image=runtimeContext.submit_runner_image)
                 self.stdout.write(uuid + "\n")
                 return (None, "success")
 
@@ -612,11 +623,15 @@ The 'jobs' API is no longer supported.
         self.ignore_docker_for_reuse = runtimeContext.ignore_docker_for_reuse
         self.eval_timeout = runtimeContext.eval_timeout
 
-        runtimeContext = runtimeContext.copy()
         runtimeContext.use_container = True
         runtimeContext.tmpdir_prefix = "tmp"
         runtimeContext.work_api = self.work_api
 
+        if not self.output_name:
+             self.output_name = "Output from workflow %s" % runtimeContext.name
+
+        self.output_name  = cleanup_name_for_collection(self.output_name)
+
         if self.work_api == "containers":
             if self.ignore_docker_for_reuse:
                 raise Exception("--ignore-docker-for-reuse not supported with containers API.")
@@ -769,8 +784,6 @@ The 'jobs' API is no longer supported.
             if workbench2 or workbench1:
                 logger.info("Output at %scollections/%s", workbench2 or workbench1, tool.final_output)
         else:
-            if self.output_name is None:
-                self.output_name = "Output of %s" % (shortname(tool.tool["id"]))
             if self.output_tags is None:
                 self.output_tags = ""
 
@@ -781,7 +794,16 @@ The 'jobs' API is no longer supported.
             else:
                 storage_classes = runtimeContext.storage_classes.strip().split(",")
 
-            self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, storage_classes, self.output_tags, self.final_output)
+            output_properties = {}
+            output_properties_req, _ = tool.get_requirement("http://arvados.org/cwl#OutputCollectionProperties")
+            if output_properties_req:
+                builder = make_builder(job_order, tool.hints, tool.requirements, runtimeContext, tool.metadata)
+                for pr in output_properties_req["outputProperties"]:
+                    output_properties[pr["propertyName"]] = builder.do_eval(pr["propertyValue"])
+
+            self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, storage_classes,
+                                                                                          self.output_tags, output_properties,
+                                                                                          self.final_output)
             self.set_crunch_output()
 
         if runtimeContext.compute_checksum: