19744: Add --enable/disable-usage-report
[arvados.git] / sdk / cwl / arvados_cwl / arvcontainer.py
index 632e171b40220f90e57663bcbc363f25d5b86dac..d4b1e0050dda2a35a2e007bf47bff1c4d4fd1c3f 100644 (file)
@@ -27,6 +27,9 @@ from cwltool.job import JobBase
 
 import arvados.collection
 
+import crunchstat_summary.summarizer
+import crunchstat_summary.reader
+
 from .arvdocker import arv_docker_get_image
 from . import done
 from .runner import Runner, arvados_jobs_image, packed_workflow, trim_anonymous_location, remove_redundant_fields, make_builder
@@ -367,11 +370,16 @@ class ArvadosContainer(JobBase):
                 logger.warning("%s API revision is %s, revision %s is required to support setting properties on output collections.",
                                self.arvrunner.label(self), self.arvrunner.api._rootDesc["revision"], "20220510")
 
-        ramMultiplier = [1]
+        ram_multiplier = [1]
 
         oom_retry_req, _ = self.get_requirement("http://arvados.org/cwl#OutOfMemoryRetry")
-        if oom_retry_req and oom_retry_req.get('memoryRetryMultipler'):
-            ramMultiplier.append(oom_retry_req.get('memoryRetryMultipler'))
+        if oom_retry_req:
+            if oom_retry_req.get('memoryRetryMultiplier'):
+                ram_multiplier.append(oom_retry_req.get('memoryRetryMultiplier'))
+            elif oom_retry_req.get('memoryRetryMultipler'):
+                ram_multiplier.append(oom_retry_req.get('memoryRetryMultipler'))
+            else:
+                ram_multiplier.append(2)
 
         if runtimeContext.runnerjob.startswith("arvwf:"):
             wfuuid = runtimeContext.runnerjob[6:runtimeContext.runnerjob.index("#")]
@@ -386,12 +394,14 @@ class ArvadosContainer(JobBase):
         try:
             ram = runtime_constraints["ram"]
 
-            for i in ramMultiplier:
+            self.uuid = runtimeContext.submit_request_uuid
+
+            for i in ram_multiplier:
                 runtime_constraints["ram"] = ram * i
 
-                if runtimeContext.submit_request_uuid:
+                if self.uuid:
                     response = self.arvrunner.api.container_requests().update(
-                        uuid=runtimeContext.submit_request_uuid,
+                        uuid=self.uuid,
                         body=container_request,
                         **extra_submit_params
                     ).execute(num_retries=self.arvrunner.num_retries)
@@ -400,22 +410,21 @@ class ArvadosContainer(JobBase):
                         body=container_request,
                         **extra_submit_params
                     ).execute(num_retries=self.arvrunner.num_retries)
-                    runtimeContext.submit_request_uuid = response["uuid"]
+                    self.uuid = response["uuid"]
 
                 if response["container_uuid"] is not None:
                     break
 
             if response["container_uuid"] is None:
-                runtime_constraints["ram"] = ram * ramMultiplier[self.attempt_count]
+                runtime_constraints["ram"] = ram * ram_multiplier[self.attempt_count]
 
             container_request["state"] = "Committed"
             response = self.arvrunner.api.container_requests().update(
-                uuid=runtimeContext.submit_request_uuid,
+                uuid=self.uuid,
                 body=container_request,
                 **extra_submit_params
             ).execute(num_retries=self.arvrunner.num_retries)
 
-            self.uuid = response["uuid"]
             self.arvrunner.process_submitted(self)
             self.attempt_count += 1
 
@@ -449,7 +458,7 @@ class ArvadosContainer(JobBase):
         done.logtail(logc, callback, "", maxlen=1000)
 
         # Check allocation failure
-        oom_matches = oom_retry_req.get('memoryErrorRegex') or r'(bad_alloc|out ?of ?memory|container using over 9.% of memory)'
+        oom_matches = oom_retry_req.get('memoryErrorRegex') or r'(bad_alloc|out ?of ?memory|memory ?error|container using over 9.% of memory)'
         if re.search(oom_matches, loglines[0], re.IGNORECASE | re.MULTILINE):
             return True
 
@@ -458,6 +467,7 @@ class ArvadosContainer(JobBase):
     def done(self, record):
         outputs = {}
         retried = False
+        rcode = None
         try:
             container = self.arvrunner.api.containers().get(
                 uuid=record["container_uuid"]
@@ -476,7 +486,7 @@ class ArvadosContainer(JobBase):
                     processStatus = "permanentFail"
 
                 if processStatus == "permanentFail" and self.attempt_count == 1 and self.out_of_memory_retry(record, container):
-                    logger.info("%s Container failed with out of memory error, retrying with more RAM.",
+                    logger.warning("%s Container failed with out of memory error, retrying with more RAM.",
                                  self.arvrunner.label(self))
                     self.job_runtime.submit_request_uuid = None
                     self.uuid = None
@@ -485,20 +495,23 @@ class ArvadosContainer(JobBase):
                     return
 
                 if rcode == 137:
-                    logger.warning("%s Container may have been killed for using too much RAM.  Try resubmitting with a higher 'ramMin'.",
+                    logger.warning("%s Container may have been killed for using too much RAM.  Try resubmitting with a higher 'ramMin' or use the arv:OutOfMemoryRetry feature.",
                                  self.arvrunner.label(self))
             else:
                 processStatus = "permanentFail"
 
-            if processStatus == "permanentFail" and record["log_uuid"]:
-                logc = arvados.collection.CollectionReader(record["log_uuid"],
-                                                           api_client=self.arvrunner.api,
-                                                           keep_client=self.arvrunner.keep_client,
-                                                           num_retries=self.arvrunner.num_retries)
+            logc = None
+            if record["log_uuid"]:
+                logc = arvados.collection.Collection(record["log_uuid"],
+                                                     api_client=self.arvrunner.api,
+                                                     keep_client=self.arvrunner.keep_client,
+                                                     num_retries=self.arvrunner.num_retries)
+
+            if processStatus == "permanentFail" and logc is not None:
                 label = self.arvrunner.label(self)
                 done.logtail(
                     logc, logger.error,
-                    "%s (%s) error log:" % (label, record["uuid"]), maxlen=40)
+                    "%s (%s) error log:" % (label, record["uuid"]), maxlen=40, include_crunchrun=(rcode is None or rcode > 127))
 
             if record["output_uuid"]:
                 if self.arvrunner.trash_intermediate or self.arvrunner.intermediate_output_ttl:
@@ -520,6 +533,28 @@ class ArvadosContainer(JobBase):
                 uuid=self.uuid,
                 body={"container_request": {"properties": properties}}
             ).execute(num_retries=self.arvrunner.num_retries)
+
+            if logc is not None and self.job_runtime.enable_usage_report is not False:
+                try:
+                    summarizer = crunchstat_summary.summarizer.ContainerRequestSummarizer(
+                        record,
+                        collection_object=logc,
+                        label=self.name,
+                        arv=self.arvrunner.api)
+                    summarizer.run()
+                    with logc.open("usage_report.html", "wt") as mr:
+                        mr.write(summarizer.html_report())
+                    logc.save()
+
+                    # Post warnings about nodes that are under-utilized.
+                    for rc in summarizer._recommend_gen(lambda x: x):
+                        logger.warning(x)
+
+                except Exception as e:
+                    logger.warning("%s unable to generate resource usage report",
+                                 self.arvrunner.label(self),
+                                 exc_info=(e if self.arvrunner.debug else False))
+
         except WorkflowException as e:
             # Only include a stack trace if in debug mode.
             # A stack trace may obfuscate more useful output about the workflow.
@@ -558,13 +593,19 @@ class RunnerContainer(Runner):
                 }
                 self.job_order[param] = {"$include": mnt}
 
+        container_image = arvados_jobs_image(self.arvrunner, self.jobs_image, runtimeContext)
+
+        workflow_runner_req, _ = self.embedded_tool.get_requirement("http://arvados.org/cwl#WorkflowRunnerResources")
+        if workflow_runner_req and workflow_runner_req.get("acrContainerImage"):
+            container_image = workflow_runner_req.get("acrContainerImage")
+
         container_req = {
             "name": self.name,
             "output_path": "/var/spool/cwl",
             "cwd": "/var/spool/cwl",
             "priority": self.priority,
             "state": "Committed",
-            "container_image": arvados_jobs_image(self.arvrunner, self.jobs_image, runtimeContext),
+            "container_image": container_image,
             "mounts": {
                 "/var/lib/cwl/cwl.input.json": {
                     "kind": "json",
@@ -585,7 +626,7 @@ class RunnerContainer(Runner):
                 "ram": 1024*1024 * (math.ceil(self.submit_runner_ram) + math.ceil(self.collection_cache_size)),
                 "API": True
             },
-            "use_existing": False, # Never reuse the runner container - see #15497.
+            "use_existing": self.reuse_runner,
             "properties": {}
         }
 
@@ -609,6 +650,8 @@ class RunnerContainer(Runner):
                 "content": packed
             }
             container_req["properties"]["template_uuid"] = self.embedded_tool.tool["id"][6:33]
+        elif self.embedded_tool.tool.get("id", "").startswith("file:"):
+            raise WorkflowException("Tool id '%s' is a local file but expected keep: or arvwf:" % self.embedded_tool.tool.get("id"))
         else:
             main = self.loadingContext.loader.idx["_:main"]
             if main.get("id") == "_:main":
@@ -689,6 +732,12 @@ class RunnerContainer(Runner):
         if runtimeContext.prefer_cached_downloads:
             command.append("--prefer-cached-downloads")
 
+        if runtimeContext.enable_usage_report is True:
+            command.append("--enable-usage-report")
+
+        if runtimeContext.enable_usage_report is False:
+            command.append("--disable-usage-report")
+
         if self.fast_parser:
             command.append("--fast-parser")
 
@@ -729,14 +778,9 @@ class RunnerContainer(Runner):
 
         logger.info("%s submitted container_request %s", self.arvrunner.label(self), response["uuid"])
 
-        workbench1 = self.arvrunner.api.config()["Services"]["Workbench1"]["ExternalURL"]
         workbench2 = self.arvrunner.api.config()["Services"]["Workbench2"]["ExternalURL"]
-        url = ""
         if workbench2:
             url = "{}processes/{}".format(workbench2, response["uuid"])
-        elif workbench1:
-            url = "{}container_requests/{}".format(workbench1, response["uuid"])
-        if url:
             logger.info("Monitor workflow progress at %s", url)