7 from cwltool.process import get_feature, shortname
8 from cwltool.errors import WorkflowException
9 from cwltool.draft2tool import revmap_file, CommandLineTool
10 from cwltool.load_tool import fetch_document
11 from cwltool.builder import Builder
13 import arvados.collection
15 from .arvdocker import arv_docker_get_image
16 from .runner import Runner, arvados_jobs_image
17 from .pathmapper import InitialWorkDirPathMapper
18 from .perf import Perf
20 from ._version import __version__
22 logger = logging.getLogger('arvados.cwl-runner')
23 metrics = logging.getLogger('arvados.cwl-runner.metrics')
25 crunchrunner_re = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.(tmpdir|outdir|keep)\)=(.*)")
27 crunchrunner_git_commit = 'a3f2cb186e437bfce0031b024b2157b73ed2717d'
29 class ArvadosJob(object):
30 """Submit and manage a Crunch job for executing a CWL CommandLineTool."""
32 def __init__(self, runner):
33 self.arvrunner = runner
37 def run(self, dry_run=False, pull_image=True, **kwargs):
39 "command": self.command_line
41 runtime_constraints = {}
43 with Perf(metrics, "generatefiles %s" % self.name):
44 if self.generatefiles["listing"]:
45 vwd = arvados.collection.Collection(api_client=self.arvrunner.api,
46 keep_client=self.arvrunner.keep_client,
47 num_retries=self.arvrunner.num_retries)
48 script_parameters["task.vwd"] = {}
49 generatemapper = InitialWorkDirPathMapper([self.generatefiles], "", "",
52 with Perf(metrics, "createfiles %s" % self.name):
53 for f, p in generatemapper.items():
54 if p.type == "CreateFile":
55 with vwd.open(p.target, "w") as n:
56 n.write(p.resolved.encode("utf-8"))
58 with Perf(metrics, "generatefiles.save_new %s" % self.name):
61 for f, p in generatemapper.items():
63 script_parameters["task.vwd"][p.target] = p.resolved
64 if p.type == "CreateFile":
65 script_parameters["task.vwd"][p.target] = "$(task.keep)/%s/%s" % (vwd.portable_data_hash(), p.target)
67 script_parameters["task.env"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
69 script_parameters["task.env"].update(self.environment)
72 script_parameters["task.stdin"] = self.stdin
75 script_parameters["task.stdout"] = self.stdout
78 script_parameters["task.stderr"] = self.stderr
81 script_parameters["task.successCodes"] = self.successCodes
82 if self.temporaryFailCodes:
83 script_parameters["task.temporaryFailCodes"] = self.temporaryFailCodes
84 if self.permanentFailCodes:
85 script_parameters["task.permanentFailCodes"] = self.permanentFailCodes
87 with Perf(metrics, "arv_docker_get_image %s" % self.name):
88 (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
89 if docker_req and kwargs.get("use_container") is not False:
90 if docker_req.get("dockerOutputDirectory"):
91 raise SourceLine(docker_req, "dockerOutputDirectory", UnsupportedRequirement).makeError(
92 "Option 'dockerOutputDirectory' of DockerRequirement not supported.")
93 runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image, self.arvrunner.project_uuid)
95 runtime_constraints["docker_image"] = arvados_jobs_image(self.arvrunner)
97 resources = self.builder.resources
98 if resources is not None:
99 runtime_constraints["min_cores_per_node"] = resources.get("cores", 1)
100 runtime_constraints["min_ram_mb_per_node"] = resources.get("ram")
101 runtime_constraints["min_scratch_mb_per_node"] = resources.get("tmpdirSize", 0) + resources.get("outdirSize", 0)
103 runtime_req, _ = get_feature(self, "http://arvados.org/cwl#RuntimeConstraints")
105 if "keep_cache" in runtime_req:
106 runtime_constraints["keep_cache_mb_per_task"] = runtime_req["keep_cache"]
107 if "outputDirType" in runtime_req:
108 if runtime_req["outputDirType"] == "local_output_dir":
109 script_parameters["task.keepTmpOutput"] = False
110 elif runtime_req["outputDirType"] == "keep_output_dir":
111 script_parameters["task.keepTmpOutput"] = True
113 filters = [["repository", "=", "arvados"],
114 ["script", "=", "crunchrunner"],
115 ["script_version", "in git", crunchrunner_git_commit]]
116 if not self.arvrunner.ignore_docker_for_reuse:
117 filters.append(["docker_image_locator", "in docker", runtime_constraints["docker_image"]])
120 with Perf(metrics, "create %s" % self.name):
121 response = self.arvrunner.api.jobs().create(
123 "owner_uuid": self.arvrunner.project_uuid,
124 "script": "crunchrunner",
125 "repository": "arvados",
126 "script_version": "master",
127 "minimum_script_version": crunchrunner_git_commit,
128 "script_parameters": {"tasks": [script_parameters]},
129 "runtime_constraints": runtime_constraints
132 find_or_create=kwargs.get("enable_reuse", True)
133 ).execute(num_retries=self.arvrunner.num_retries)
135 self.arvrunner.processes[response["uuid"]] = self
137 self.update_pipeline_component(response)
139 logger.info("%s %s is %s", self.arvrunner.label(self), response["uuid"], response["state"])
141 if response["state"] in ("Complete", "Failed", "Cancelled"):
142 with Perf(metrics, "done %s" % self.name):
144 except Exception as e:
145 logger.exception("%s error" % (self.arvrunner.label(self)))
146 self.output_callback({}, "permanentFail")
148 def update_pipeline_component(self, record):
149 if self.arvrunner.pipeline:
150 self.arvrunner.pipeline["components"][self.name] = {"job": record}
151 with Perf(metrics, "update_pipeline_component %s" % self.name):
152 self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"],
154 "components": self.arvrunner.pipeline["components"]
155 }).execute(num_retries=self.arvrunner.num_retries)
156 if self.arvrunner.uuid:
158 job = self.arvrunner.api.jobs().get(uuid=self.arvrunner.uuid).execute()
160 components = job["components"]
161 components[self.name] = record["uuid"]
162 self.arvrunner.api.jobs().update(uuid=self.arvrunner.uuid,
164 "components": components
165 }).execute(num_retries=self.arvrunner.num_retries)
166 except Exception as e:
167 logger.info("Error adding to components: %s", e)
169 def done(self, record):
171 self.update_pipeline_component(record)
176 if record["state"] == "Complete":
177 processStatus = "success"
179 processStatus = "permanentFail"
184 with Perf(metrics, "inspect log %s" % self.name):
185 logc = arvados.collection.CollectionReader(record["log"],
186 api_client=self.arvrunner.api,
187 keep_client=self.arvrunner.keep_client,
188 num_retries=self.arvrunner.num_retries)
189 log = logc.open(logc.keys()[0])
195 # Determine the tmpdir, outdir and keepdir paths from
196 # the job run. Unfortunately, we can't take the first
197 # values we find (which are expected to be near the
198 # top) and stop scanning because if the node fails and
199 # the job restarts on a different node these values
200 # will different runs, and we need to know about the
201 # final run that actually produced output.
202 g = crunchrunner_re.match(l)
204 dirs[g.group(1)] = g.group(2)
206 if processStatus == "permanentFail":
207 done.logtail(logc, logger, "%s error log:" % self.arvrunner.label(self))
209 with Perf(metrics, "output collection %s" % self.name):
210 outputs = done.done(self, record, dirs["tmpdir"],
211 dirs["outdir"], dirs["keep"])
212 except WorkflowException as e:
213 logger.error("%s unable to collect output from %s:\n%s",
214 self.arvrunner.label(self), record["output"], e, exc_info=(e if self.arvrunner.debug else False))
215 processStatus = "permanentFail"
216 except Exception as e:
217 logger.exception("Got unknown exception while collecting output for job %s:", self.name)
218 processStatus = "permanentFail"
220 # Note: Currently, on error output_callback is expecting an empty dict,
221 # anything else will fail.
222 if not isinstance(outputs, dict):
223 logger.error("Unexpected output type %s '%s'", type(outputs), outputs)
225 processStatus = "permanentFail"
227 self.output_callback(outputs, processStatus)
228 if record["uuid"] in self.arvrunner.processes:
229 del self.arvrunner.processes[record["uuid"]]
231 class RunnerJob(Runner):
232 """Submit and manage a Crunch job that runs crunch_scripts/cwl-runner."""
234 def arvados_job_spec(self, dry_run=False, pull_image=True, **kwargs):
235 """Create an Arvados job specification for this workflow.
237 The returned dict can be used to create a job (i.e., passed as
238 the +body+ argument to jobs().create()), or as a component in
239 a pipeline template or pipeline instance.
242 workflowmapper = super(RunnerJob, self).arvados_job_spec(dry_run=dry_run, pull_image=pull_image, **kwargs)
244 self.job_order["cwl:tool"] = workflowmapper.mapper(self.tool.tool["id"]).target[5:]
247 self.job_order["arv:output_name"] = self.output_name
250 self.job_order["arv:output_tags"] = self.output_tags
252 self.job_order["arv:enable_reuse"] = self.enable_reuse
255 self.job_order["arv:on_error"] = self.on_error
258 "script": "cwl-runner",
259 "script_version": "master",
260 "minimum_script_version": "570509ab4d2ef93d870fd2b1f2eab178afb1bad9",
261 "repository": "arvados",
262 "script_parameters": self.job_order,
263 "runtime_constraints": {
264 "docker_image": arvados_jobs_image(self.arvrunner),
265 "min_ram_mb_per_node": self.submit_runner_ram
269 def run(self, *args, **kwargs):
270 job_spec = self.arvados_job_spec(*args, **kwargs)
272 job_spec.setdefault("owner_uuid", self.arvrunner.project_uuid)
274 job = self.arvrunner.api.jobs().create(
276 find_or_create=self.enable_reuse
277 ).execute(num_retries=self.arvrunner.num_retries)
279 for k,v in job_spec["script_parameters"].items():
280 if v is False or v is None or isinstance(v, dict):
281 job_spec["script_parameters"][k] = {"value": v}
283 del job_spec["owner_uuid"]
284 job_spec["job"] = job
285 self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().create(
287 "owner_uuid": self.arvrunner.project_uuid,
289 "components": {"cwl-runner": job_spec },
290 "state": "RunningOnServer"}).execute(num_retries=self.arvrunner.num_retries)
291 logger.info("Created pipeline %s", self.arvrunner.pipeline["uuid"])
293 if kwargs.get("wait") is False:
294 self.uuid = self.arvrunner.pipeline["uuid"]
297 self.uuid = job["uuid"]
298 self.arvrunner.processes[self.uuid] = self
300 if job["state"] in ("Complete", "Failed", "Cancelled"):
304 class RunnerTemplate(object):
305 """An Arvados pipeline template that invokes a CWL workflow."""
307 type_to_dataclass = {
308 'boolean': 'boolean',
310 'Directory': 'Collection',
316 def __init__(self, runner, tool, job_order, enable_reuse, uuid,
317 submit_runner_ram=0, name=None):
320 self.job = RunnerJob(
324 enable_reuse=enable_reuse,
327 submit_runner_ram=submit_runner_ram,
331 def pipeline_component_spec(self):
332 """Return a component that Workbench and a-r-p-i will understand.
334 Specifically, translate CWL input specs to Arvados pipeline
335 format, like {"dataclass":"File","value":"xyz"}.
338 spec = self.job.arvados_job_spec()
340 # Most of the component spec is exactly the same as the job
341 # spec (script, script_version, etc.).
342 # spec['script_parameters'] isn't right, though. A component
343 # spec's script_parameters hash is a translation of
344 # self.tool.tool['inputs'] with defaults/overrides taken from
345 # the job order. So we move the job parameters out of the way
346 # and build a new spec['script_parameters'].
347 job_params = spec['script_parameters']
348 spec['script_parameters'] = {}
350 for param in self.tool.tool['inputs']:
351 param = copy.deepcopy(param)
353 # Data type and "required" flag...
354 types = param['type']
355 if not isinstance(types, list):
357 param['required'] = 'null' not in types
358 non_null_types = set(types) - set(['null'])
359 if len(non_null_types) == 1:
360 the_type = [c for c in non_null_types][0]
361 dataclass = self.type_to_dataclass.get(the_type)
363 param['dataclass'] = dataclass
364 # Note: If we didn't figure out a single appropriate
365 # dataclass, we just left that attribute out. We leave
366 # the "type" attribute there in any case, which might help
369 # Title and description...
370 title = param.pop('label', '')
371 descr = param.pop('doc', '').rstrip('\n')
373 param['title'] = title
375 param['description'] = descr
377 # Fill in the value from the current job order, if any.
378 param_id = shortname(param.pop('id'))
379 value = job_params.get(param_id)
382 elif not isinstance(value, dict):
383 param['value'] = value
384 elif param.get('dataclass') in ('File', 'Collection') and value.get('location'):
385 param['value'] = value['location'][5:]
387 spec['script_parameters'][param_id] = param
388 spec['script_parameters']['cwl:tool'] = job_params['cwl:tool']
394 self.job.name: self.pipeline_component_spec(),
396 "name": self.job.name,
398 if self.runner.project_uuid:
399 body["owner_uuid"] = self.runner.project_uuid
401 self.runner.api.pipeline_templates().update(
402 uuid=self.uuid, body=body).execute(
403 num_retries=self.runner.num_retries)
404 logger.info("Updated template %s", self.uuid)
406 self.uuid = self.runner.api.pipeline_templates().create(
407 body=body, ensure_unique_name=True).execute(
408 num_retries=self.runner.num_retries)['uuid']
409 logger.info("Created template %s", self.uuid)