5 from cwltool.process import get_feature, shortname
6 from cwltool.errors import WorkflowException
7 from cwltool.draft2tool import revmap_file, remove_hostfs, CommandLineTool
8 from cwltool.load_tool import fetch_document
9 from cwltool.builder import Builder
11 import arvados.collection
13 from .arvdocker import arv_docker_get_image
14 from .runner import Runner
17 logger = logging.getLogger('arvados.cwl-runner')
19 tmpdirre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.tmpdir\)=(.*)")
20 outdirre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.outdir\)=(.*)")
21 keepre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.keep\)=(.*)")
23 class ArvadosJob(object):
24 """Submit and manage a Crunch job for executing a CWL CommandLineTool."""
26 def __init__(self, runner):
27 self.arvrunner = runner
31 def run(self, dry_run=False, pull_image=True, **kwargs):
33 "command": self.command_line
35 runtime_constraints = {}
37 if self.generatefiles:
38 vwd = arvados.collection.Collection()
39 script_parameters["task.vwd"] = {}
40 for t in self.generatefiles:
41 if isinstance(self.generatefiles[t], dict):
42 src, rest = self.arvrunner.fs_access.get_collection(self.generatefiles[t]["path"].replace("$(task.keep)/", "keep:"))
43 vwd.copy(rest, t, source_collection=src)
45 with vwd.open(t, "w") as f:
46 f.write(self.generatefiles[t].encode('utf-8'))
48 for t in self.generatefiles:
49 script_parameters["task.vwd"][t] = "$(task.keep)/%s/%s" % (vwd.portable_data_hash(), t)
51 script_parameters["task.env"] = {"TMPDIR": "$(task.tmpdir)"}
53 script_parameters["task.env"].update(self.environment)
56 script_parameters["task.stdin"] = self.pathmapper.mapper(self.stdin)[1]
59 script_parameters["task.stdout"] = self.stdout
61 (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
62 if docker_req and kwargs.get("use_container") is not False:
63 runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image, self.arvrunner.project_uuid)
65 runtime_constraints["docker_image"] = "arvados/jobs"
67 resources = self.builder.resources
68 if resources is not None:
69 runtime_constraints["min_cores_per_node"] = resources.get("cores", 1)
70 runtime_constraints["min_ram_mb_per_node"] = resources.get("ram")
71 runtime_constraints["min_scratch_mb_per_node"] = resources.get("tmpdirSize", 0) + resources.get("outdirSize", 0)
73 filters = [["repository", "=", "arvados"],
74 ["script", "=", "crunchrunner"],
75 ["script_version", "in git", "9e5b98e8f5f4727856b53447191f9c06e3da2ba6"]]
76 if not self.arvrunner.ignore_docker_for_reuse:
77 filters.append(["docker_image_locator", "in docker", runtime_constraints["docker_image"]])
80 response = self.arvrunner.api.jobs().create(
82 "owner_uuid": self.arvrunner.project_uuid,
83 "script": "crunchrunner",
84 "repository": "arvados",
85 "script_version": "master",
86 "minimum_script_version": "9e5b98e8f5f4727856b53447191f9c06e3da2ba6",
87 "script_parameters": {"tasks": [script_parameters]},
88 "runtime_constraints": runtime_constraints
91 find_or_create=kwargs.get("enable_reuse", True)
92 ).execute(num_retries=self.arvrunner.num_retries)
94 self.arvrunner.processes[response["uuid"]] = self
96 self.update_pipeline_component(response)
98 logger.info("Job %s (%s) is %s", self.name, response["uuid"], response["state"])
100 if response["state"] in ("Complete", "Failed", "Cancelled"):
102 except Exception as e:
103 logger.error("Got error %s" % str(e))
104 self.output_callback({}, "permanentFail")
106 def update_pipeline_component(self, record):
107 if self.arvrunner.pipeline:
108 self.arvrunner.pipeline["components"][self.name] = {"job": record}
109 self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"],
111 "components": self.arvrunner.pipeline["components"]
112 }).execute(num_retries=self.arvrunner.num_retries)
113 if self.arvrunner.uuid:
115 job = self.arvrunner.api.jobs().get(uuid=self.arvrunner.uuid).execute()
117 components = job["components"]
118 components[self.name] = record["uuid"]
119 self.arvrunner.api.jobs().update(uuid=self.arvrunner.uuid,
121 "components": components
122 }).execute(num_retries=self.arvrunner.num_retries)
123 except Exception as e:
124 logger.info("Error adding to components: %s", e)
126 def done(self, record):
128 self.update_pipeline_component(record)
133 if record["state"] == "Complete":
134 processStatus = "success"
136 processStatus = "permanentFail"
141 logc = arvados.collection.Collection(record["log"])
142 log = logc.open(logc.keys()[0])
147 # Determine the tmpdir, outdir and keepdir paths from
148 # the job run. Unfortunately, we can't take the first
149 # values we find (which are expected to be near the
150 # top) and stop scanning because if the node fails and
151 # the job restarts on a different node these values
152 # will different runs, and we need to know about the
153 # final run that actually produced output.
155 g = tmpdirre.match(l)
158 g = outdirre.match(l)
165 outputs = done.done(self, record, tmpdir, outdir, keepdir)
166 except WorkflowException as e:
167 logger.error("Error while collecting job outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False))
168 processStatus = "permanentFail"
169 except Exception as e:
170 logger.exception("Got unknown exception while collecting job outputs:")
171 processStatus = "permanentFail"
173 self.output_callback(outputs, processStatus)
175 del self.arvrunner.processes[record["uuid"]]
178 class RunnerJob(Runner):
179 """Submit and manage a Crunch job that runs crunch_scripts/cwl-runner."""
181 def arvados_job_spec(self, dry_run=False, pull_image=True, **kwargs):
182 """Create an Arvados job specification for this workflow.
184 The returned dict can be used to create a job (i.e., passed as
185 the +body+ argument to jobs().create()), or as a component in
186 a pipeline template or pipeline instance.
189 workflowmapper = super(RunnerJob, self).arvados_job_spec(dry_run=dry_run, pull_image=pull_image, **kwargs)
191 self.job_order["cwl:tool"] = workflowmapper.mapper(self.tool.tool["id"])[1]
193 "script": "cwl-runner",
194 "script_version": "master",
195 "repository": "arvados",
196 "script_parameters": self.job_order,
197 "runtime_constraints": {
198 "docker_image": "arvados/jobs"
202 def run(self, *args, **kwargs):
203 job_spec = self.arvados_job_spec(*args, **kwargs)
204 job_spec.setdefault("owner_uuid", self.arvrunner.project_uuid)
206 response = self.arvrunner.api.jobs().create(
208 find_or_create=self.enable_reuse
209 ).execute(num_retries=self.arvrunner.num_retries)
211 self.uuid = response["uuid"]
212 self.arvrunner.processes[self.uuid] = self
214 logger.info("Submitted job %s", response["uuid"])
216 if kwargs.get("submit"):
217 self.pipeline = self.arvrunner.api.pipeline_instances().create(
219 "owner_uuid": self.arvrunner.project_uuid,
220 "name": shortname(self.tool.tool["id"]),
221 "components": {"cwl-runner": {"job": {"uuid": self.uuid, "state": response["state"]} } },
222 "state": "RunningOnClient"}).execute(num_retries=self.arvrunner.num_retries)
224 if response["state"] in ("Complete", "Failed", "Cancelled"):
228 class RunnerTemplate(object):
229 """An Arvados pipeline template that invokes a CWL workflow."""
231 type_to_dataclass = {
232 'boolean': 'boolean',
239 def __init__(self, runner, tool, job_order, enable_reuse):
242 self.job = RunnerJob(
246 enable_reuse=enable_reuse)
248 def pipeline_component_spec(self):
249 """Return a component that Workbench and a-r-p-i will understand.
251 Specifically, translate CWL input specs to Arvados pipeline
252 format, like {"dataclass":"File","value":"xyz"}.
254 spec = self.job.arvados_job_spec()
256 # Most of the component spec is exactly the same as the job
257 # spec (script, script_version, etc.).
258 # spec['script_parameters'] isn't right, though. A component
259 # spec's script_parameters hash is a translation of
260 # self.tool.tool['inputs'] with defaults/overrides taken from
261 # the job order. So we move the job parameters out of the way
262 # and build a new spec['script_parameters'].
263 job_params = spec['script_parameters']
264 spec['script_parameters'] = {}
266 for param in self.tool.tool['inputs']:
267 param = copy.deepcopy(param)
269 # Data type and "required" flag...
270 types = param['type']
271 if not isinstance(types, list):
273 param['required'] = 'null' not in types
274 non_null_types = set(types) - set(['null'])
275 if len(non_null_types) == 1:
276 the_type = [c for c in non_null_types][0]
277 dataclass = self.type_to_dataclass.get(the_type)
279 param['dataclass'] = dataclass
280 # Note: If we didn't figure out a single appropriate
281 # dataclass, we just left that attribute out. We leave
282 # the "type" attribute there in any case, which might help
285 # Title and description...
286 title = param.pop('label', '')
287 descr = param.pop('description', '').rstrip('\n')
289 param['title'] = title
291 param['description'] = descr
293 # Fill in the value from the current job order, if any.
294 param_id = shortname(param.pop('id'))
295 value = job_params.get(param_id)
298 elif not isinstance(value, dict):
299 param['value'] = value
300 elif param.get('dataclass') == 'File' and value.get('path'):
301 param['value'] = value['path']
303 spec['script_parameters'][param_id] = param
304 spec['script_parameters']['cwl:tool'] = job_params['cwl:tool']
308 job_spec = self.pipeline_component_spec()
309 response = self.runner.api.pipeline_templates().create(body={
311 self.job.name: job_spec,
313 "name": self.job.name,
314 "owner_uuid": self.runner.project_uuid,
315 }, ensure_unique_name=True).execute(num_retries=self.runner.num_retries)
316 self.uuid = response["uuid"]
317 logger.info("Created template %s", self.uuid)