X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/6358388ad9f7676aa3b1ab149c9fbde4716929e5..cf9afd31cf9bc682ea3f8c0d59012fa7080b0e9b:/sdk/cwl/arvados_cwl/arvcontainer.py?ds=sidebyside diff --git a/sdk/cwl/arvados_cwl/arvcontainer.py b/sdk/cwl/arvados_cwl/arvcontainer.py index c85443a23a..ec9c52c59a 100644 --- a/sdk/cwl/arvados_cwl/arvcontainer.py +++ b/sdk/cwl/arvados_cwl/arvcontainer.py @@ -146,6 +146,8 @@ class ArvadosContainer(JobBase): mounts[targetdir]["path"] = path prevdir = targetdir + "/" + intermediate_collection_info = arvados_cwl.util.get_intermediate_collection_info(self.name, runtimeContext.current_container, runtimeContext.intermediate_output_ttl) + with Perf(metrics, "generatefiles %s" % self.name): if self.generatefiles["listing"]: vwd = arvados.collection.Collection(api_client=self.arvrunner.api, @@ -197,12 +199,11 @@ class ArvadosContainer(JobBase): if not runtimeContext.current_container: runtimeContext.current_container = arvados_cwl.util.get_current_container(self.arvrunner.api, self.arvrunner.num_retries, logger) - info = arvados_cwl.util.get_intermediate_collection_info(self.name, runtimeContext.current_container, runtimeContext.intermediate_output_ttl) - vwd.save_new(name=info["name"], + vwd.save_new(name=intermediate_collection_info["name"], owner_uuid=runtimeContext.project_uuid, ensure_unique_name=True, - trash_at=info["trash_at"], - properties=info["properties"]) + trash_at=intermediate_collection_info["trash_at"], + properties=intermediate_collection_info["properties"]) prev = None for f, p in sorteditems: @@ -247,7 +248,8 @@ class ArvadosContainer(JobBase): runtimeContext.project_uuid, runtimeContext.force_docker_pull, runtimeContext.tmp_outdir_prefix, - runtimeContext.match_local_docker) + runtimeContext.match_local_docker, + runtimeContext.copy_deps) network_req, _ = self.get_requirement("NetworkAccess") if network_req: @@ -318,7 +320,7 @@ class ArvadosContainer(JobBase): if runtimeContext.submit_runner_cluster: extra_submit_params["cluster_id"] = runtimeContext.submit_runner_cluster - container_request["output_name"] = "Output for step %s" % (self.name) + container_request["output_name"] = "Output from step %s" % (self.name) container_request["output_ttl"] = self.output_ttl container_request["mounts"] = mounts container_request["secret_mounts"] = secret_mounts @@ -340,6 +342,16 @@ class ArvadosContainer(JobBase): for pr in properties_req["processProperties"]: container_request["properties"][pr["propertyName"]] = self.builder.do_eval(pr["propertyValue"]) + output_properties_req, _ = self.get_requirement("http://arvados.org/cwl#OutputCollectionProperties") + if output_properties_req: + if self.arvrunner.api._rootDesc["revision"] >= "20220510": + container_request["output_properties"] = {} + for pr in output_properties_req["outputProperties"]: + container_request["output_properties"][pr["propertyName"]] = self.builder.do_eval(pr["propertyValue"]) + else: + logger.warning("%s API server is too old to support setting properties on output collections.", + self.arvrunner.label(self)) + if runtimeContext.runnerjob.startswith("arvwf:"): wfuuid = runtimeContext.runnerjob[6:runtimeContext.runnerjob.index("#")] wfrecord = self.arvrunner.api.workflows().get(uuid=wfuuid).execute(num_retries=self.arvrunner.num_retries) @@ -394,7 +406,7 @@ class ArvadosContainer(JobBase): processStatus = "permanentFail" if rcode == 137: - logger.warning("%s job was killed on the compute instance. The most common reason is that it attempted to allocate too much RAM and was targeted by the Out Of Memory (OOM) killer. Try resubmitting with a higher 'ramMin'.", + logger.warning("%s Container may have been killed for using too much RAM. Try resubmitting with a higher 'ramMin'.", self.arvrunner.label(self)) else: processStatus = "permanentFail" @@ -465,7 +477,7 @@ class RunnerContainer(Runner): "cwd": "/var/spool/cwl", "priority": self.priority, "state": "Committed", - "container_image": arvados_jobs_image(self.arvrunner, self.jobs_image), + "container_image": arvados_jobs_image(self.arvrunner, self.jobs_image, runtimeContext), "mounts": { "/var/lib/cwl/cwl.input.json": { "kind": "json", @@ -500,7 +512,7 @@ class RunnerContainer(Runner): "portable_data_hash": "%s" % workflowcollection } else: - packed = packed_workflow(self.arvrunner, self.embedded_tool, self.merged_map) + packed = packed_workflow(self.arvrunner, self.embedded_tool, self.merged_map, runtimeContext) workflowpath = "/var/lib/cwl/workflow.json#main" container_req["mounts"]["/var/lib/cwl/workflow.json"] = { "kind": "json", @@ -550,17 +562,17 @@ class RunnerContainer(Runner): if runtimeContext.intermediate_storage_classes != "default" and runtimeContext.intermediate_storage_classes: command.append("--intermediate-storage-classes=" + runtimeContext.intermediate_storage_classes) - if self.on_error: + if runtimeContext.on_error: command.append("--on-error=" + self.on_error) - if self.intermediate_output_ttl: - command.append("--intermediate-output-ttl=%d" % self.intermediate_output_ttl) + if runtimeContext.intermediate_output_ttl: + command.append("--intermediate-output-ttl=%d" % runtimeContext.intermediate_output_ttl) - if self.arvrunner.trash_intermediate: + if runtimeContext.trash_intermediate: command.append("--trash-intermediate") - if self.arvrunner.project_uuid: - command.append("--project-uuid="+self.arvrunner.project_uuid) + if runtimeContext.project_uuid: + command.append("--project-uuid="+runtimeContext.project_uuid) if self.enable_dev: command.append("--enable-dev") @@ -581,8 +593,8 @@ class RunnerContainer(Runner): def run(self, runtimeContext): runtimeContext.keepprefix = "keep:" job_spec = self.arvados_job_spec(runtimeContext) - if self.arvrunner.project_uuid: - job_spec["owner_uuid"] = self.arvrunner.project_uuid + if runtimeContext.project_uuid: + job_spec["owner_uuid"] = runtimeContext.project_uuid extra_submit_params = {} if runtimeContext.submit_runner_cluster: