7 from cwltool.process import get_feature, shortname
8 from cwltool.errors import WorkflowException
9 from cwltool.draft2tool import revmap_file, CommandLineTool
10 from cwltool.load_tool import fetch_document
11 from cwltool.builder import Builder
13 import arvados.collection
15 from .arvdocker import arv_docker_get_image
16 from .runner import Runner, arvados_jobs_image
17 from .pathmapper import InitialWorkDirPathMapper
18 from .perf import Perf
20 from ._version import __version__
22 logger = logging.getLogger('arvados.cwl-runner')
23 metrics = logging.getLogger('arvados.cwl-runner.metrics')
25 tmpdirre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.tmpdir\)=(.*)")
26 outdirre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.outdir\)=(.*)")
27 keepre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.keep\)=(.*)")
29 class ArvadosJob(object):
30 """Submit and manage a Crunch job for executing a CWL CommandLineTool."""
32 def __init__(self, runner):
33 self.arvrunner = runner
37 def run(self, dry_run=False, pull_image=True, **kwargs):
39 "command": self.command_line
41 runtime_constraints = {}
43 with Perf(metrics, "generatefiles %s" % self.name):
44 if self.generatefiles["listing"]:
45 vwd = arvados.collection.Collection(api_client=self.arvrunner.api,
46 keep_client=self.arvrunner.keep_client,
47 num_retries=self.arvrunner.num_retries)
48 script_parameters["task.vwd"] = {}
49 generatemapper = InitialWorkDirPathMapper([self.generatefiles], "", "",
52 with Perf(metrics, "createfiles %s" % self.name):
53 for f, p in generatemapper.items():
54 if p.type == "CreateFile":
55 with vwd.open(p.target, "w") as n:
56 n.write(p.resolved.encode("utf-8"))
58 with Perf(metrics, "generatefiles.save_new %s" % self.name):
61 for f, p in generatemapper.items():
63 script_parameters["task.vwd"][p.target] = p.resolved
64 if p.type == "CreateFile":
65 script_parameters["task.vwd"][p.target] = "$(task.keep)/%s/%s" % (vwd.portable_data_hash(), p.target)
67 script_parameters["task.env"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
69 script_parameters["task.env"].update(self.environment)
72 script_parameters["task.stdin"] = self.stdin
75 script_parameters["task.stdout"] = self.stdout
78 script_parameters["task.stderr"] = self.stderr
81 script_parameters["task.successCodes"] = self.successCodes
82 if self.temporaryFailCodes:
83 script_parameters["task.temporaryFailCodes"] = self.temporaryFailCodes
84 if self.permanentFailCodes:
85 script_parameters["task.permanentFailCodes"] = self.permanentFailCodes
87 with Perf(metrics, "arv_docker_get_image %s" % self.name):
88 (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
89 if docker_req and kwargs.get("use_container") is not False:
90 runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image, self.arvrunner.project_uuid)
92 runtime_constraints["docker_image"] = arvados_jobs_image(self.arvrunner)
94 resources = self.builder.resources
95 if resources is not None:
96 runtime_constraints["min_cores_per_node"] = resources.get("cores", 1)
97 runtime_constraints["min_ram_mb_per_node"] = resources.get("ram")
98 runtime_constraints["min_scratch_mb_per_node"] = resources.get("tmpdirSize", 0) + resources.get("outdirSize", 0)
100 runtime_req, _ = get_feature(self, "http://arvados.org/cwl#RuntimeConstraints")
102 if "keep_cache" in runtime_req:
103 runtime_constraints["keep_cache_mb_per_task"] = runtime_req["keep_cache"]
104 if "outputDirType" in runtime_req:
105 if runtime_req["outputDirType"] == "local_output_dir":
106 script_parameters["task.keepTmpOutput"] = False
107 elif runtime_req["outputDirType"] == "keep_output_dir":
108 script_parameters["task.keepTmpOutput"] = True
110 filters = [["repository", "=", "arvados"],
111 ["script", "=", "crunchrunner"],
112 ["script_version", "in git", "9e5b98e8f5f4727856b53447191f9c06e3da2ba6"]]
113 if not self.arvrunner.ignore_docker_for_reuse:
114 filters.append(["docker_image_locator", "in docker", runtime_constraints["docker_image"]])
117 with Perf(metrics, "create %s" % self.name):
118 response = self.arvrunner.api.jobs().create(
120 "owner_uuid": self.arvrunner.project_uuid,
121 "script": "crunchrunner",
122 "repository": "arvados",
123 "script_version": "master",
124 "minimum_script_version": "9e5b98e8f5f4727856b53447191f9c06e3da2ba6",
125 "script_parameters": {"tasks": [script_parameters]},
126 "runtime_constraints": runtime_constraints
129 find_or_create=kwargs.get("enable_reuse", True)
130 ).execute(num_retries=self.arvrunner.num_retries)
132 self.arvrunner.processes[response["uuid"]] = self
134 self.update_pipeline_component(response)
136 logger.info("Job %s (%s) is %s", self.name, response["uuid"], response["state"])
138 if response["state"] in ("Complete", "Failed", "Cancelled"):
139 with Perf(metrics, "done %s" % self.name):
141 except Exception as e:
142 logger.error("Got error %s" % str(e))
143 self.output_callback({}, "permanentFail")
145 def update_pipeline_component(self, record):
146 if self.arvrunner.pipeline:
147 self.arvrunner.pipeline["components"][self.name] = {"job": record}
148 with Perf(metrics, "update_pipeline_component %s" % self.name):
149 self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"],
151 "components": self.arvrunner.pipeline["components"]
152 }).execute(num_retries=self.arvrunner.num_retries)
153 if self.arvrunner.uuid:
155 job = self.arvrunner.api.jobs().get(uuid=self.arvrunner.uuid).execute()
157 components = job["components"]
158 components[self.name] = record["uuid"]
159 self.arvrunner.api.jobs().update(uuid=self.arvrunner.uuid,
161 "components": components
162 }).execute(num_retries=self.arvrunner.num_retries)
163 except Exception as e:
164 logger.info("Error adding to components: %s", e)
166 def done(self, record):
168 self.update_pipeline_component(record)
173 if record["state"] == "Complete":
174 processStatus = "success"
176 processStatus = "permanentFail"
181 with Perf(metrics, "inspect log %s" % self.name):
182 logc = arvados.collection.CollectionReader(record["log"],
183 api_client=self.arvrunner.api,
184 keep_client=self.arvrunner.keep_client,
185 num_retries=self.arvrunner.num_retries)
186 log = logc.open(logc.keys()[0])
191 # Determine the tmpdir, outdir and keepdir paths from
192 # the job run. Unfortunately, we can't take the first
193 # values we find (which are expected to be near the
194 # top) and stop scanning because if the node fails and
195 # the job restarts on a different node these values
196 # will different runs, and we need to know about the
197 # final run that actually produced output.
199 g = tmpdirre.match(l)
202 g = outdirre.match(l)
209 with Perf(metrics, "output collection %s" % self.name):
210 outputs = done.done(self, record, tmpdir, outdir, keepdir)
211 except WorkflowException as e:
212 logger.error("Error while collecting job outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False))
213 processStatus = "permanentFail"
215 except Exception as e:
216 logger.exception("Got unknown exception while collecting job outputs:")
217 processStatus = "permanentFail"
220 self.output_callback(outputs, processStatus)
222 del self.arvrunner.processes[record["uuid"]]
225 class RunnerJob(Runner):
226 """Submit and manage a Crunch job that runs crunch_scripts/cwl-runner."""
228 def arvados_job_spec(self, dry_run=False, pull_image=True, **kwargs):
229 """Create an Arvados job specification for this workflow.
231 The returned dict can be used to create a job (i.e., passed as
232 the +body+ argument to jobs().create()), or as a component in
233 a pipeline template or pipeline instance.
236 workflowmapper = super(RunnerJob, self).arvados_job_spec(dry_run=dry_run, pull_image=pull_image, **kwargs)
238 # Need to filter this out, gets added by cwltool when providing
239 # parameters on the command line, and arv-run-pipeline-instance doesn't
241 if "job_order" in self.job_order:
242 del self.job_order["job_order"]
244 self.job_order["cwl:tool"] = workflowmapper.mapper(self.tool.tool["id"]).target[5:]
246 self.job_order["arv:output_name"] = self.output_name
248 "script": "cwl-runner",
249 "script_version": __version__,
250 "repository": "arvados",
251 "script_parameters": self.job_order,
252 "runtime_constraints": {
253 "docker_image": arvados_jobs_image(self.arvrunner)
257 def run(self, *args, **kwargs):
258 job_spec = self.arvados_job_spec(*args, **kwargs)
260 job_spec.setdefault("owner_uuid", self.arvrunner.project_uuid)
262 job = self.arvrunner.api.jobs().create(
264 find_or_create=self.enable_reuse
265 ).execute(num_retries=self.arvrunner.num_retries)
267 for k,v in job_spec["script_parameters"].items():
268 if v is False or v is None or isinstance(v, dict):
269 job_spec["script_parameters"][k] = {"value": v}
271 del job_spec["owner_uuid"]
272 job_spec["job"] = job
273 self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().create(
275 "owner_uuid": self.arvrunner.project_uuid,
276 "name": shortname(self.tool.tool["id"]),
277 "components": {"cwl-runner": job_spec },
278 "state": "RunningOnServer"}).execute(num_retries=self.arvrunner.num_retries)
279 logger.info("Created pipeline %s", self.arvrunner.pipeline["uuid"])
281 if kwargs.get("wait") is False:
282 self.uuid = self.arvrunner.pipeline["uuid"]
285 self.uuid = job["uuid"]
286 self.arvrunner.processes[self.uuid] = self
288 if job["state"] in ("Complete", "Failed", "Cancelled"):
292 class RunnerTemplate(object):
293 """An Arvados pipeline template that invokes a CWL workflow."""
295 type_to_dataclass = {
296 'boolean': 'boolean',
298 'Directory': 'Collection',
304 def __init__(self, runner, tool, job_order, enable_reuse):
307 self.job = RunnerJob(
311 enable_reuse=enable_reuse,
314 def pipeline_component_spec(self):
315 """Return a component that Workbench and a-r-p-i will understand.
317 Specifically, translate CWL input specs to Arvados pipeline
318 format, like {"dataclass":"File","value":"xyz"}.
320 spec = self.job.arvados_job_spec()
322 # Most of the component spec is exactly the same as the job
323 # spec (script, script_version, etc.).
324 # spec['script_parameters'] isn't right, though. A component
325 # spec's script_parameters hash is a translation of
326 # self.tool.tool['inputs'] with defaults/overrides taken from
327 # the job order. So we move the job parameters out of the way
328 # and build a new spec['script_parameters'].
329 job_params = spec['script_parameters']
330 spec['script_parameters'] = {}
332 for param in self.tool.tool['inputs']:
333 param = copy.deepcopy(param)
335 # Data type and "required" flag...
336 types = param['type']
337 if not isinstance(types, list):
339 param['required'] = 'null' not in types
340 non_null_types = set(types) - set(['null'])
341 if len(non_null_types) == 1:
342 the_type = [c for c in non_null_types][0]
343 dataclass = self.type_to_dataclass.get(the_type)
345 param['dataclass'] = dataclass
346 # Note: If we didn't figure out a single appropriate
347 # dataclass, we just left that attribute out. We leave
348 # the "type" attribute there in any case, which might help
351 # Title and description...
352 title = param.pop('label', '')
353 descr = param.pop('doc', '').rstrip('\n')
355 param['title'] = title
357 param['description'] = descr
359 # Fill in the value from the current job order, if any.
360 param_id = shortname(param.pop('id'))
361 value = job_params.get(param_id)
364 elif not isinstance(value, dict):
365 param['value'] = value
366 elif param.get('dataclass') in ('File', 'Collection') and value.get('location'):
367 param['value'] = value['location'][5:]
369 spec['script_parameters'][param_id] = param
370 spec['script_parameters']['cwl:tool'] = job_params['cwl:tool']
374 job_spec = self.pipeline_component_spec()
375 response = self.runner.api.pipeline_templates().create(body={
377 self.job.name: job_spec,
379 "name": self.job.name,
380 "owner_uuid": self.runner.project_uuid,
381 }, ensure_unique_name=True).execute(num_retries=self.runner.num_retries)
382 self.uuid = response["uuid"]
383 logger.info("Created template %s", self.uuid)