8442: Setting up mount points works. Capturing output works.
[arvados.git] / sdk / cwl / arvados_cwl / arvjob.py
1 import logging
2 import re
3 from . import done
4 from .arvdocker import arv_docker_get_image
5 from cwltool.process import get_feature
6 from cwltool.errors import WorkflowException
7 import arvados.collection
8
9 logger = logging.getLogger('arvados.cwl-runner')
10
11 tmpdirre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.tmpdir\)=(.*)")
12 outdirre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.outdir\)=(.*)")
13 keepre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.keep\)=(.*)")
14
15 class ArvadosJob(object):
16     """Submit and manage a Crunch job for executing a CWL CommandLineTool."""
17
18     def __init__(self, runner):
19         self.arvrunner = runner
20         self.running = False
21
22     def run(self, dry_run=False, pull_image=True, **kwargs):
23         script_parameters = {
24             "command": self.command_line
25         }
26         runtime_constraints = {}
27
28         if self.generatefiles:
29             vwd = arvados.collection.Collection()
30             script_parameters["task.vwd"] = {}
31             for t in self.generatefiles:
32                 if isinstance(self.generatefiles[t], dict):
33                     src, rest = self.arvrunner.fs_access.get_collection(self.generatefiles[t]["path"].replace("$(task.keep)/", "keep:"))
34                     vwd.copy(rest, t, source_collection=src)
35                 else:
36                     with vwd.open(t, "w") as f:
37                         f.write(self.generatefiles[t])
38             vwd.save_new()
39             for t in self.generatefiles:
40                 script_parameters["task.vwd"][t] = "$(task.keep)/%s/%s" % (vwd.portable_data_hash(), t)
41
42         script_parameters["task.env"] = {"TMPDIR": "$(task.tmpdir)"}
43         if self.environment:
44             script_parameters["task.env"].update(self.environment)
45
46         if self.stdin:
47             script_parameters["task.stdin"] = self.pathmapper.mapper(self.stdin)[1]
48
49         if self.stdout:
50             script_parameters["task.stdout"] = self.stdout
51
52         (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
53         if docker_req and kwargs.get("use_container") is not False:
54             runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image, self.arvrunner.project_uuid)
55         else:
56             runtime_constraints["docker_image"] = "arvados/jobs"
57
58         resources = self.builder.resources
59         if resources is not None:
60             runtime_constraints["min_cores_per_node"] = resources.get("cores", 1)
61             runtime_constraints["min_ram_mb_per_node"] = resources.get("ram")
62             runtime_constraints["min_scratch_mb_per_node"] = resources.get("tmpdirSize", 0) + resources.get("outdirSize", 0)
63
64         filters = [["repository", "=", "arvados"],
65                    ["script", "=", "crunchrunner"],
66                    ["script_version", "in git", "9e5b98e8f5f4727856b53447191f9c06e3da2ba6"]]
67         if not self.arvrunner.ignore_docker_for_reuse:
68             filters.append(["docker_image_locator", "in docker", runtime_constraints["docker_image"]])
69
70         try:
71             response = self.arvrunner.api.jobs().create(
72                 body={
73                     "owner_uuid": self.arvrunner.project_uuid,
74                     "script": "crunchrunner",
75                     "repository": "arvados",
76                     "script_version": "master",
77                     "minimum_script_version": "9e5b98e8f5f4727856b53447191f9c06e3da2ba6",
78                     "script_parameters": {"tasks": [script_parameters]},
79                     "runtime_constraints": runtime_constraints
80                 },
81                 filters=filters,
82                 find_or_create=kwargs.get("enable_reuse", True)
83             ).execute(num_retries=self.arvrunner.num_retries)
84
85             self.arvrunner.jobs[response["uuid"]] = self
86
87             self.update_pipeline_component(response)
88
89             logger.info("Job %s (%s) is %s", self.name, response["uuid"], response["state"])
90
91             if response["state"] in ("Complete", "Failed", "Cancelled"):
92                 self.done(response)
93         except Exception as e:
94             logger.error("Got error %s" % str(e))
95             self.output_callback({}, "permanentFail")
96
97     def update_pipeline_component(self, record):
98         if self.arvrunner.pipeline:
99             self.arvrunner.pipeline["components"][self.name] = {"job": record}
100             self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"],
101                                                                                  body={
102                                                                                     "components": self.arvrunner.pipeline["components"]
103                                                                                  }).execute(num_retries=self.arvrunner.num_retries)
104         if self.arvrunner.uuid:
105             try:
106                 job = self.arvrunner.api.jobs().get(uuid=self.arvrunner.uuid).execute()
107                 if job:
108                     components = job["components"]
109                     components[self.name] = record["uuid"]
110                     self.arvrunner.api.jobs().update(uuid=self.arvrunner.uuid,
111                         body={
112                             "components": components
113                         }).execute(num_retries=self.arvrunner.num_retries)
114             except Exception as e:
115                 logger.info("Error adding to components: %s", e)
116
117     def done(self, record):
118         try:
119             self.update_pipeline_component(record)
120         except:
121             pass
122
123         try:
124             if record["state"] == "Complete":
125                 processStatus = "success"
126             else:
127                 processStatus = "permanentFail"
128
129             outputs = {}
130             try:
131                 if record["output"]:
132                     logc = arvados.collection.Collection(record["log"])
133                     log = logc.open(logc.keys()[0])
134                     tmpdir = None
135                     outdir = None
136                     keepdir = None
137                     for l in log:
138                         # Determine the tmpdir, outdir and keepdir paths from
139                         # the job run.  Unfortunately, we can't take the first
140                         # values we find (which are expected to be near the
141                         # top) and stop scanning because if the node fails and
142                         # the job restarts on a different node these values
143                         # will different runs, and we need to know about the
144                         # final run that actually produced output.
145
146                         g = tmpdirre.match(l)
147                         if g:
148                             tmpdir = g.group(1)
149                         g = outdirre.match(l)
150                         if g:
151                             outdir = g.group(1)
152                         g = keepre.match(l)
153                         if g:
154                             keepdir = g.group(1)
155
156                     outputs = done.done(self, record, tmpdir, outdir, keepdir)
157             except WorkflowException as e:
158                 logger.error("Error while collecting job outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False))
159                 processStatus = "permanentFail"
160             except Exception as e:
161                 logger.exception("Got unknown exception while collecting job outputs:")
162                 processStatus = "permanentFail"
163
164             self.output_callback(outputs, processStatus)
165         finally:
166             del self.arvrunner.jobs[record["uuid"]]