Fix CollectionCache to pass num_retries to CollectionReader
[arvados.git] / sdk / cwl / arvados_cwl / arvjob.py
index d104d56e91e7453b5605a0222e17818184a42cb2..2731b2694422fcf8a986057266efe23354830c46 100644 (file)
@@ -32,7 +32,7 @@ from ._version import __version__
 logger = logging.getLogger('arvados.cwl-runner')
 metrics = logging.getLogger('arvados.cwl-runner.metrics')
 
-crunchrunner_re = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.(tmpdir|outdir|keep)\)=(.*)")
+crunchrunner_re = re.compile(r"^.*crunchrunner: \$\(task\.(tmpdir|outdir|keep)\)=(.*)$")
 
 crunchrunner_git_commit = 'a3f2cb186e437bfce0031b024b2157b73ed2717d'
 
@@ -184,17 +184,19 @@ class ArvadosJob(object):
         if self.arvrunner.pipeline:
             self.arvrunner.pipeline["components"][self.name] = {"job": record}
             with Perf(metrics, "update_pipeline_component %s" % self.name):
-                self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"],
-                                                                                 body={
-                                                                                    "components": self.arvrunner.pipeline["components"]
-                                                                                 }).execute(num_retries=self.arvrunner.num_retries)
+                self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(
+                    uuid=self.arvrunner.pipeline["uuid"],
+                    body={
+                        "components": self.arvrunner.pipeline["components"]
+                    }).execute(num_retries=self.arvrunner.num_retries)
         if self.arvrunner.uuid:
             try:
                 job = self.arvrunner.api.jobs().get(uuid=self.arvrunner.uuid).execute()
                 if job:
                     components = job["components"]
                     components[self.name] = record["uuid"]
-                    self.arvrunner.api.jobs().update(uuid=self.arvrunner.uuid,
+                    self.arvrunner.api.jobs().update(
+                        uuid=self.arvrunner.uuid,
                         body={
                             "components": components
                         }).execute(num_retries=self.arvrunner.num_retries)
@@ -222,12 +224,13 @@ class ArvadosJob(object):
                                                                    keep_client=self.arvrunner.keep_client,
                                                                    num_retries=self.arvrunner.num_retries)
                         log = logc.open(logc.keys()[0])
-                        dirs = {}
-                        tmpdir = None
-                        outdir = None
-                        keepdir = None
+                        dirs = {
+                            "tmpdir": "/tmpdir",
+                            "outdir": "/outdir",
+                            "keep": "/keep"
+                        }
                         for l in log:
-                            # Determine the tmpdir, outdir and keepdir paths from
+                            # Determine the tmpdir, outdir and keep paths from
                             # the job run.  Unfortunately, we can't take the first
                             # values we find (which are expected to be near the
                             # top) and stop scanning because if the node fails and
@@ -277,7 +280,7 @@ class RunnerJob(Runner):
         if self.tool.tool["id"].startswith("keep:"):
             self.job_order["cwl:tool"] = self.tool.tool["id"][5:]
         else:
-            packed = packed_workflow(self.arvrunner, self.tool)
+            packed = packed_workflow(self.arvrunner, self.tool, self.merged_map)
             wf_pdh = upload_workflow_collection(self.arvrunner, self.name, packed)
             self.job_order["cwl:tool"] = "%s/workflow.cwl#main" % wf_pdh
 
@@ -296,6 +299,9 @@ class RunnerJob(Runner):
         if self.on_error:
             self.job_order["arv:on_error"] = self.on_error
 
+        if kwargs.get("debug"):
+            self.job_order["arv:debug"] = True
+
         return {
             "script": "cwl-runner",
             "script_version": "master",
@@ -324,12 +330,20 @@ class RunnerJob(Runner):
 
         del job_spec["owner_uuid"]
         job_spec["job"] = job
+
+        instance_spec = {
+            "owner_uuid": self.arvrunner.project_uuid,
+            "name": self.name,
+            "components": {
+                "cwl-runner": job_spec,
+            },
+            "state": "RunningOnServer",
+        }
+        if not self.enable_reuse:
+            instance_spec["properties"] = {"run_options": {"enable_job_reuse": False}}
+
         self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().create(
-            body={
-                "owner_uuid": self.arvrunner.project_uuid,
-                "name": self.name,
-                "components": {"cwl-runner": job_spec },
-                "state": "RunningOnServer"}).execute(num_retries=self.arvrunner.num_retries)
+            body=instance_spec).execute(num_retries=self.arvrunner.num_retries)
         logger.info("Created pipeline %s", self.arvrunner.pipeline["uuid"])
 
         if kwargs.get("wait") is False:
@@ -356,7 +370,7 @@ class RunnerTemplate(object):
     }
 
     def __init__(self, runner, tool, job_order, enable_reuse, uuid,
-                 submit_runner_ram=0, name=None):
+                 submit_runner_ram=0, name=None, merged_map=None):
         self.runner = runner
         self.tool = tool
         self.job = RunnerJob(
@@ -367,7 +381,8 @@ class RunnerTemplate(object):
             output_name=None,
             output_tags=None,
             submit_runner_ram=submit_runner_ram,
-            name=name)
+            name=name,
+            merged_map=merged_map)
         self.uuid = uuid
 
     def pipeline_component_spec(self):