Merge branch 'master' into 16811-public-favs
[arvados.git] / sdk / cwl / arvados_cwl / arvcontainer.py
index 03b4e07c76f5849a97ae85b9bd179e897ec8fc33..99d82f3398332d10e0ac0cbf95decb0d5870bd5a 100644 (file)
@@ -33,6 +33,7 @@ from .runner import Runner, arvados_jobs_image, packed_workflow, trim_anonymous_
 from .fsaccess import CollectionFetcher
 from .pathmapper import NoFollowPathMapper, trim_listing
 from .perf import Perf
+from ._version import __version__
 
 logger = logging.getLogger('arvados.cwl-runner')
 metrics = logging.getLogger('arvados.cwl-runner.metrics')
@@ -123,6 +124,8 @@ class ArvadosContainer(JobBase):
                 "kind": "collection",
                 "portable_data_hash": pdh
             }
+            if pdh in self.pathmapper.pdh_to_uuid:
+                mounts[targetdir]["uuid"] = self.pathmapper.pdh_to_uuid[pdh]
             if len(sp) == 2:
                 if tp == "Directory":
                     path = sp[1]
@@ -147,21 +150,28 @@ class ArvadosContainer(JobBase):
                 with Perf(metrics, "createfiles %s" % self.name):
                     for f, p in sorteditems:
                         if not p.target:
-                            pass
-                        elif p.type in ("File", "Directory", "WritableFile", "WritableDirectory"):
+                            continue
+
+                        if p.target.startswith("/"):
+                            dst = p.target[len(self.outdir)+1:] if p.target.startswith(self.outdir+"/") else p.target[1:]
+                        else:
+                            dst = p.target
+
+                        if p.type in ("File", "Directory", "WritableFile", "WritableDirectory"):
                             if p.resolved.startswith("_:"):
-                                vwd.mkdirs(p.target)
+                                vwd.mkdirs(dst)
                             else:
                                 source, path = self.arvrunner.fs_access.get_collection(p.resolved)
-                                vwd.copy(path, p.target, source_collection=source)
+                                vwd.copy(path or ".", dst, source_collection=source)
                         elif p.type == "CreateFile":
                             if self.arvrunner.secret_store.has_secret(p.resolved):
-                                secret_mounts["%s/%s" % (self.outdir, p.target)] = {
+                                mountpoint = p.target if p.target.startswith("/") else os.path.join(self.outdir, p.target)
+                                secret_mounts[mountpoint] = {
                                     "kind": "text",
                                     "content": self.arvrunner.secret_store.retrieve(p.resolved)
                                 }
                             else:
-                                with vwd.open(p.target, "w") as n:
+                                with vwd.open(dst, "w") as n:
                                     n.write(p.resolved)
 
                 def keepemptydirs(p):
@@ -188,10 +198,14 @@ class ArvadosContainer(JobBase):
                     if (not p.target or self.arvrunner.secret_store.has_secret(p.resolved) or
                         (prev is not None and p.target.startswith(prev))):
                         continue
-                    mountpoint = "%s/%s" % (self.outdir, p.target)
+                    if p.target.startswith("/"):
+                        dst = p.target[len(self.outdir)+1:] if p.target.startswith(self.outdir+"/") else p.target[1:]
+                    else:
+                        dst = p.target
+                    mountpoint = p.target if p.target.startswith("/") else os.path.join(self.outdir, p.target)
                     mounts[mountpoint] = {"kind": "collection",
                                           "portable_data_hash": vwd.portable_data_hash(),
-                                          "path": p.target}
+                                          "path": dst}
                     if p.type.startswith("Writable"):
                         mounts[mountpoint]["writable"] = True
                     prev = p.target + "/"
@@ -216,13 +230,17 @@ class ArvadosContainer(JobBase):
 
         (docker_req, docker_is_req) = self.get_requirement("DockerRequirement")
         if not docker_req:
-            docker_req = {"dockerImageId": "arvados/jobs"}
+            docker_req = {"dockerImageId": "arvados/jobs:"+__version__}
 
         container_request["container_image"] = arv_docker_get_image(self.arvrunner.api,
                                                                     docker_req,
                                                                     runtimeContext.pull_image,
                                                                     runtimeContext.project_uuid)
 
+        network_req, _ = self.get_requirement("NetworkAccess")
+        if network_req:
+            runtime_constraints["API"] = network_req["networkAccess"]
+
         api_req, _ = self.get_requirement("http://arvados.org/cwl#APIRequirement")
         if api_req:
             runtime_constraints["API"] = True
@@ -254,7 +272,7 @@ class ArvadosContainer(JobBase):
         if self.output_ttl < 0:
             raise WorkflowException("Invalid value %d for output_ttl, cannot be less than zero" % container_request["output_ttl"])
 
-        if self.timelimit is not None:
+        if self.timelimit is not None and self.timelimit > 0:
             scheduling_parameters["max_run_time"] = self.timelimit
 
         extra_submit_params = {}
@@ -270,6 +288,9 @@ class ArvadosContainer(JobBase):
 
         enable_reuse = runtimeContext.enable_reuse
         if enable_reuse:
+            reuse_req, _ = self.get_requirement("WorkReuse")
+            if reuse_req:
+                enable_reuse = reuse_req["enableReuse"]
             reuse_req, _ = self.get_requirement("http://arvados.org/cwl#ReuseRequirement")
             if reuse_req:
                 enable_reuse = reuse_req["enableReuse"]
@@ -304,8 +325,9 @@ class ArvadosContainer(JobBase):
                 logger.info("%s reused container %s", self.arvrunner.label(self), response["container_uuid"])
             else:
                 logger.info("%s %s state is %s", self.arvrunner.label(self), response["uuid"], response["state"])
-        except Exception:
-            logger.exception("%s got an error", self.arvrunner.label(self))
+        except Exception as e:
+            logger.exception("%s error submitting container\n%s", self.arvrunner.label(self), e)
+            logger.debug("Container request was %s", container_request)
             self.output_callback({}, "permanentFail")
 
     def done(self, record):
@@ -329,8 +351,8 @@ class ArvadosContainer(JobBase):
             else:
                 processStatus = "permanentFail"
 
-            if processStatus == "permanentFail":
-                logc = arvados.collection.CollectionReader(container["log"],
+            if processStatus == "permanentFail" and record["log_uuid"]:
+                logc = arvados.collection.CollectionReader(record["log_uuid"],
                                                            api_client=self.arvrunner.api,
                                                            keep_client=self.arvrunner.keep_client,
                                                            num_retries=self.arvrunner.num_retries)
@@ -353,8 +375,8 @@ class ArvadosContainer(JobBase):
             if container["output"]:
                 outputs = done.done_outputs(self, container, "/tmp", self.outdir, "/keep")
         except WorkflowException as e:
-            # Only include a stack trace if in debug mode. 
-            # A stack trace may obfuscate more useful output about the workflow. 
+            # Only include a stack trace if in debug mode.
+            # A stack trace may obfuscate more useful output about the workflow.
             logger.error("%s unable to collect output from %s:\n%s",
                          self.arvrunner.label(self), container["output"], e, exc_info=(e if self.arvrunner.debug else False))
             processStatus = "permanentFail"
@@ -416,7 +438,7 @@ class RunnerContainer(Runner):
                 "ram": 1024*1024 * (math.ceil(self.submit_runner_ram) + math.ceil(self.collection_cache_size)),
                 "API": True
             },
-            "use_existing": self.enable_reuse,
+            "use_existing": False, # Never reuse the runner container - see #15497.
             "properties": {}
         }
 
@@ -453,6 +475,7 @@ class RunnerContainer(Runner):
                    "--api=containers",
                    "--no-log-timestamps",
                    "--disable-validate",
+                   "--disable-color",
                    "--eval-timeout=%s" % self.arvrunner.eval_timeout,
                    "--thread-count=%s" % self.arvrunner.thread_count,
                    "--enable-reuse" if self.enable_reuse else "--disable-reuse",
@@ -483,6 +506,9 @@ class RunnerContainer(Runner):
         if self.arvrunner.project_uuid:
             command.append("--project-uuid="+self.arvrunner.project_uuid)
 
+        if self.enable_dev:
+            command.append("--enable-dev")
+
         command.extend([workflowpath, "/var/lib/cwl/cwl.input.json"])
 
         container_req["command"] = command
@@ -525,6 +551,7 @@ class RunnerContainer(Runner):
             container = self.arvrunner.api.containers().get(
                 uuid=record["container_uuid"]
             ).execute(num_retries=self.arvrunner.num_retries)
+            container["log"] = record["log_uuid"]
         except Exception:
             logger.exception("%s while getting runner container", self.arvrunner.label(self))
             self.arvrunner.output_callback({}, "permanentFail")