1 from past.builtins import basestring
2 from builtins import object
3 # Copyright (C) The Arvados Authors. All rights reserved.
5 # SPDX-License-Identifier: Apache-2.0
13 from cwltool.process import shortname, UnsupportedRequirement
14 from cwltool.errors import WorkflowException
15 from cwltool.command_line_tool import revmap_file, CommandLineTool
16 from cwltool.load_tool import fetch_document
17 from cwltool.builder import Builder
18 from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, visit_class
19 from cwltool.job import JobBase
21 from schema_salad.sourceline import SourceLine
23 import arvados_cwl.util
24 import ruamel.yaml as yaml
26 import arvados.collection
27 from arvados.errors import ApiError
29 from .arvdocker import arv_docker_get_image
30 from .runner import Runner, arvados_jobs_image, packed_workflow, upload_workflow_collection, trim_anonymous_location, remove_redundant_fields
31 from .pathmapper import VwdPathMapper, trim_listing
32 from .perf import Perf
34 from ._version import __version__
35 from .util import get_intermediate_collection_info
37 logger = logging.getLogger('arvados.cwl-runner')
38 metrics = logging.getLogger('arvados.cwl-runner.metrics')
40 crunchrunner_re = re.compile(r"^.*crunchrunner: \$\(task\.(tmpdir|outdir|keep)\)=(.*)$")
42 crunchrunner_git_commit = 'a3f2cb186e437bfce0031b024b2157b73ed2717d'
44 class ArvadosJob(JobBase):
45 """Submit and manage a Crunch job for executing a CWL CommandLineTool."""
47 def __init__(self, runner,
48 builder, # type: Builder
49 joborder, # type: Dict[Text, Union[Dict[Text, Any], List, Text]]
50 make_path_mapper, # type: Callable[..., PathMapper]
51 requirements, # type: List[Dict[Text, Text]]
52 hints, # type: List[Dict[Text, Text]]
55 super(ArvadosJob, self).__init__(builder, joborder, make_path_mapper, requirements, hints, name)
56 self.arvrunner = runner
60 def run(self, runtimeContext):
62 "command": self.command_line
64 runtime_constraints = {}
66 with Perf(metrics, "generatefiles %s" % self.name):
67 if self.generatefiles["listing"]:
68 vwd = arvados.collection.Collection(api_client=self.arvrunner.api,
69 keep_client=self.arvrunner.keep_client,
70 num_retries=self.arvrunner.num_retries)
71 script_parameters["task.vwd"] = {}
72 generatemapper = VwdPathMapper(self.generatefiles["listing"], "", "",
75 with Perf(metrics, "createfiles %s" % self.name):
76 for f, p in list(generatemapper.items()):
77 if p.type == "CreateFile":
78 with vwd.open(p.target, "w") as n:
79 n.write(p.resolved.encode("utf-8"))
82 with Perf(metrics, "generatefiles.save_new %s" % self.name):
83 info = get_intermediate_collection_info(self.name, None, runtimeContext.intermediate_output_ttl)
84 vwd.save_new(name=info["name"],
85 owner_uuid=self.arvrunner.project_uuid,
86 ensure_unique_name=True,
87 trash_at=info["trash_at"],
88 properties=info["properties"])
90 for f, p in list(generatemapper.items()):
92 script_parameters["task.vwd"][p.target] = p.resolved
93 if p.type == "CreateFile":
94 script_parameters["task.vwd"][p.target] = "$(task.keep)/%s/%s" % (vwd.portable_data_hash(), p.target)
96 script_parameters["task.env"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
98 script_parameters["task.env"].update(self.environment)
101 script_parameters["task.stdin"] = self.stdin
104 script_parameters["task.stdout"] = self.stdout
107 script_parameters["task.stderr"] = self.stderr
109 if self.successCodes:
110 script_parameters["task.successCodes"] = self.successCodes
111 if self.temporaryFailCodes:
112 script_parameters["task.temporaryFailCodes"] = self.temporaryFailCodes
113 if self.permanentFailCodes:
114 script_parameters["task.permanentFailCodes"] = self.permanentFailCodes
116 with Perf(metrics, "arv_docker_get_image %s" % self.name):
117 (docker_req, docker_is_req) = self.get_requirement("DockerRequirement")
118 if docker_req and runtimeContext.use_container is not False:
119 if docker_req.get("dockerOutputDirectory"):
120 raise SourceLine(docker_req, "dockerOutputDirectory", UnsupportedRequirement).makeError(
121 "Option 'dockerOutputDirectory' of DockerRequirement not supported.")
122 runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api,
124 runtimeContext.pull_image,
125 self.arvrunner.project_uuid)
127 runtime_constraints["docker_image"] = "arvados/jobs"
129 resources = self.builder.resources
130 if resources is not None:
131 runtime_constraints["min_cores_per_node"] = resources.get("cores", 1)
132 runtime_constraints["min_ram_mb_per_node"] = resources.get("ram")
133 runtime_constraints["min_scratch_mb_per_node"] = resources.get("tmpdirSize", 0) + resources.get("outdirSize", 0)
135 runtime_req, _ = self.get_requirement("http://arvados.org/cwl#RuntimeConstraints")
137 if "keep_cache" in runtime_req:
138 runtime_constraints["keep_cache_mb_per_task"] = runtime_req["keep_cache"]
139 runtime_constraints["min_ram_mb_per_node"] += runtime_req["keep_cache"]
140 if "outputDirType" in runtime_req:
141 if runtime_req["outputDirType"] == "local_output_dir":
142 script_parameters["task.keepTmpOutput"] = False
143 elif runtime_req["outputDirType"] == "keep_output_dir":
144 script_parameters["task.keepTmpOutput"] = True
146 filters = [["repository", "=", "arvados"],
147 ["script", "=", "crunchrunner"],
148 ["script_version", "in git", crunchrunner_git_commit]]
149 if not self.arvrunner.ignore_docker_for_reuse:
150 filters.append(["docker_image_locator", "in docker", runtime_constraints["docker_image"]])
152 enable_reuse = runtimeContext.enable_reuse
154 reuse_req, _ = self.get_requirement("http://arvados.org/cwl#ReuseRequirement")
156 enable_reuse = reuse_req["enableReuse"]
158 self.output_callback = self.arvrunner.get_wrapped_callback(self.output_callback)
161 with Perf(metrics, "create %s" % self.name):
162 response = self.arvrunner.api.jobs().create(
164 "owner_uuid": self.arvrunner.project_uuid,
165 "script": "crunchrunner",
166 "repository": "arvados",
167 "script_version": "master",
168 "minimum_script_version": crunchrunner_git_commit,
169 "script_parameters": {"tasks": [script_parameters]},
170 "runtime_constraints": runtime_constraints
173 find_or_create=enable_reuse
174 ).execute(num_retries=self.arvrunner.num_retries)
176 self.uuid = response["uuid"]
177 self.arvrunner.process_submitted(self)
179 self.update_pipeline_component(response)
181 if response["state"] == "Complete":
182 logger.info("%s reused job %s", self.arvrunner.label(self), response["uuid"])
183 # Give read permission to the desired project on reused jobs
184 if response["owner_uuid"] != self.arvrunner.project_uuid:
186 self.arvrunner.api.links().create(body={
187 'link_class': 'permission',
189 'tail_uuid': self.arvrunner.project_uuid,
190 'head_uuid': response["uuid"],
191 }).execute(num_retries=self.arvrunner.num_retries)
192 except ApiError as e:
193 # The user might not have "manage" access on the job: log
194 # a message and continue.
195 logger.info("Creating read permission on job %s: %s",
199 logger.info("%s %s is %s", self.arvrunner.label(self), response["uuid"], response["state"])
200 except Exception as e:
201 logger.exception("%s error" % (self.arvrunner.label(self)))
202 self.output_callback({}, "permanentFail")
204 def update_pipeline_component(self, record):
205 with self.arvrunner.workflow_eval_lock:
206 if self.arvrunner.pipeline:
207 self.arvrunner.pipeline["components"][self.name] = {"job": record}
208 with Perf(metrics, "update_pipeline_component %s" % self.name):
209 self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(
210 uuid=self.arvrunner.pipeline["uuid"],
212 "components": self.arvrunner.pipeline["components"]
213 }).execute(num_retries=self.arvrunner.num_retries)
214 if self.arvrunner.uuid:
216 job = self.arvrunner.api.jobs().get(uuid=self.arvrunner.uuid).execute()
218 components = job["components"]
219 components[self.name] = record["uuid"]
220 self.arvrunner.api.jobs().update(
221 uuid=self.arvrunner.uuid,
223 "components": components
224 }).execute(num_retries=self.arvrunner.num_retries)
225 except Exception as e:
226 logger.info("Error adding to components: %s", e)
228 def done(self, record):
230 self.update_pipeline_component(record)
235 if record["state"] == "Complete":
236 processStatus = "success"
238 processStatus = "permanentFail"
243 with Perf(metrics, "inspect log %s" % self.name):
244 logc = arvados.collection.CollectionReader(record["log"],
245 api_client=self.arvrunner.api,
246 keep_client=self.arvrunner.keep_client,
247 num_retries=self.arvrunner.num_retries)
248 log = logc.open(list(logc.keys())[0])
255 # Determine the tmpdir, outdir and keep paths from
256 # the job run. Unfortunately, we can't take the first
257 # values we find (which are expected to be near the
258 # top) and stop scanning because if the node fails and
259 # the job restarts on a different node these values
260 # will different runs, and we need to know about the
261 # final run that actually produced output.
262 g = crunchrunner_re.match(l)
264 dirs[g.group(1)] = g.group(2)
266 if processStatus == "permanentFail":
267 done.logtail(logc, logger.error, "%s (%s) error log:" % (self.arvrunner.label(self), record["uuid"]), maxlen=40)
269 with Perf(metrics, "output collection %s" % self.name):
270 outputs = done.done(self, record, dirs["tmpdir"],
271 dirs["outdir"], dirs["keep"])
272 except WorkflowException as e:
273 logger.error("%s unable to collect output from %s:\n%s",
274 self.arvrunner.label(self), record["output"], e, exc_info=(e if self.arvrunner.debug else False))
275 processStatus = "permanentFail"
276 except Exception as e:
277 logger.exception("Got unknown exception while collecting output for job %s:", self.name)
278 processStatus = "permanentFail"
280 # Note: Currently, on error output_callback is expecting an empty dict,
281 # anything else will fail.
282 if not isinstance(outputs, dict):
283 logger.error("Unexpected output type %s '%s'", type(outputs), outputs)
285 processStatus = "permanentFail"
287 self.output_callback(outputs, processStatus)
290 class RunnerJob(Runner):
291 """Submit and manage a Crunch job that runs crunch_scripts/cwl-runner."""
293 def arvados_job_spec(self, debug=False):
294 """Create an Arvados job specification for this workflow.
296 The returned dict can be used to create a job (i.e., passed as
297 the +body+ argument to jobs().create()), or as a component in
298 a pipeline template or pipeline instance.
301 if self.embedded_tool.tool["id"].startswith("keep:"):
302 self.job_order["cwl:tool"] = self.embedded_tool.tool["id"][5:]
304 packed = packed_workflow(self.arvrunner, self.embedded_tool, self.merged_map)
305 wf_pdh = upload_workflow_collection(self.arvrunner, self.name, packed)
306 self.job_order["cwl:tool"] = "%s/workflow.cwl#main" % wf_pdh
308 adjustDirObjs(self.job_order, trim_listing)
309 visit_class(self.job_order, ("File", "Directory"), trim_anonymous_location)
310 visit_class(self.job_order, ("File", "Directory"), remove_redundant_fields)
313 self.job_order["arv:output_name"] = self.output_name
316 self.job_order["arv:output_tags"] = self.output_tags
318 self.job_order["arv:enable_reuse"] = self.enable_reuse
321 self.job_order["arv:on_error"] = self.on_error
324 self.job_order["arv:debug"] = True
327 "script": "cwl-runner",
328 "script_version": "master",
329 "minimum_script_version": "570509ab4d2ef93d870fd2b1f2eab178afb1bad9",
330 "repository": "arvados",
331 "script_parameters": self.job_order,
332 "runtime_constraints": {
333 "docker_image": arvados_jobs_image(self.arvrunner, self.jobs_image),
334 "min_ram_mb_per_node": self.submit_runner_ram
338 def run(self, runtimeContext):
339 job_spec = self.arvados_job_spec(runtimeContext.debug)
341 job_spec.setdefault("owner_uuid", self.arvrunner.project_uuid)
343 job = self.arvrunner.api.jobs().create(
345 find_or_create=self.enable_reuse
346 ).execute(num_retries=self.arvrunner.num_retries)
348 for k,v in list(job_spec["script_parameters"].items()):
349 if v is False or v is None or isinstance(v, dict):
350 job_spec["script_parameters"][k] = {"value": v}
352 del job_spec["owner_uuid"]
353 job_spec["job"] = job
356 "owner_uuid": self.arvrunner.project_uuid,
359 "cwl-runner": job_spec,
361 "state": "RunningOnServer",
363 if not self.enable_reuse:
364 instance_spec["properties"] = {"run_options": {"enable_job_reuse": False}}
366 self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().create(
367 body=instance_spec).execute(num_retries=self.arvrunner.num_retries)
368 logger.info("Created pipeline %s", self.arvrunner.pipeline["uuid"])
370 if runtimeContext.wait is False:
371 self.uuid = self.arvrunner.pipeline["uuid"]
374 self.uuid = job["uuid"]
375 self.arvrunner.process_submitted(self)
378 class RunnerTemplate(object):
379 """An Arvados pipeline template that invokes a CWL workflow."""
381 type_to_dataclass = {
382 'boolean': 'boolean',
384 'Directory': 'Collection',
390 def __init__(self, runner, tool, job_order, enable_reuse, uuid,
391 submit_runner_ram=0, name=None, merged_map=None,
392 loadingContext=None):
394 self.embedded_tool = tool
395 self.job = RunnerJob(
398 enable_reuse=enable_reuse,
401 submit_runner_ram=submit_runner_ram,
403 merged_map=merged_map,
404 loadingContext=loadingContext)
405 self.job.job_order = job_order
408 def pipeline_component_spec(self):
409 """Return a component that Workbench and a-r-p-i will understand.
411 Specifically, translate CWL input specs to Arvados pipeline
412 format, like {"dataclass":"File","value":"xyz"}.
415 spec = self.job.arvados_job_spec()
417 # Most of the component spec is exactly the same as the job
418 # spec (script, script_version, etc.).
419 # spec['script_parameters'] isn't right, though. A component
420 # spec's script_parameters hash is a translation of
421 # self.tool.tool['inputs'] with defaults/overrides taken from
422 # the job order. So we move the job parameters out of the way
423 # and build a new spec['script_parameters'].
424 job_params = spec['script_parameters']
425 spec['script_parameters'] = {}
427 for param in self.embedded_tool.tool['inputs']:
428 param = copy.deepcopy(param)
430 # Data type and "required" flag...
431 types = param['type']
432 if not isinstance(types, list):
434 param['required'] = 'null' not in types
435 non_null_types = [t for t in types if t != "null"]
436 if len(non_null_types) == 1:
437 the_type = [c for c in non_null_types][0]
439 if isinstance(the_type, basestring):
440 dataclass = self.type_to_dataclass.get(the_type)
442 param['dataclass'] = dataclass
443 # Note: If we didn't figure out a single appropriate
444 # dataclass, we just left that attribute out. We leave
445 # the "type" attribute there in any case, which might help
448 # Title and description...
449 title = param.pop('label', '')
450 descr = param.pop('doc', '').rstrip('\n')
452 param['title'] = title
454 param['description'] = descr
456 # Fill in the value from the current job order, if any.
457 param_id = shortname(param.pop('id'))
458 value = job_params.get(param_id)
461 elif not isinstance(value, dict):
462 param['value'] = value
463 elif param.get('dataclass') in ('File', 'Collection') and value.get('location'):
464 param['value'] = value['location'][5:]
466 spec['script_parameters'][param_id] = param
467 spec['script_parameters']['cwl:tool'] = job_params['cwl:tool']
473 self.job.name: self.pipeline_component_spec(),
475 "name": self.job.name,
477 if self.runner.project_uuid:
478 body["owner_uuid"] = self.runner.project_uuid
480 self.runner.api.pipeline_templates().update(
481 uuid=self.uuid, body=body).execute(
482 num_retries=self.runner.num_retries)
483 logger.info("Updated template %s", self.uuid)
485 self.uuid = self.runner.api.pipeline_templates().create(
486 body=body, ensure_unique_name=True).execute(
487 num_retries=self.runner.num_retries)['uuid']
488 logger.info("Created template %s", self.uuid)