6 import arvados.commands.keepdocker
7 import arvados.commands.run
8 import arvados.collection
10 import cwltool.draft2tool
11 import cwltool.workflow
13 from cwltool.process import shortname
14 from cwltool.errors import WorkflowException
23 from cwltool.process import get_feature
24 from arvados.api import OrderedJsonModel
26 logger = logging.getLogger('arvados.cwl-runner')
27 logger.setLevel(logging.INFO)
29 crunchrunner_pdh = "83db29f08544e1c319572a6bd971088a+140"
30 crunchrunner_download = "https://cloud.curoverse.com/collections/download/qr1hi-4zz18-n3m1yxd0vx78jic/1i1u2qtq66k1atziv4ocfgsg5nu5tj11n4r6e0bhvjg03rix4m/crunchrunner"
31 certs_download = "https://cloud.curoverse.com/collections/download/qr1hi-4zz18-n3m1yxd0vx78jic/1i1u2qtq66k1atziv4ocfgsg5nu5tj11n4r6e0bhvjg03rix4m/ca-certificates.crt"
33 tmpdirre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.tmpdir\)=(.*)")
34 outdirre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.outdir\)=(.*)")
35 keepre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.keep\)=(.*)")
38 def arv_docker_get_image(api_client, dockerRequirement, pull_image, project_uuid):
39 if "dockerImageId" not in dockerRequirement and "dockerPull" in dockerRequirement:
40 dockerRequirement["dockerImageId"] = dockerRequirement["dockerPull"]
42 sp = dockerRequirement["dockerImageId"].split(":")
44 image_tag = sp[1] if len(sp) > 1 else None
46 images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3,
47 image_name=image_name,
51 imageId = cwltool.docker.get_image(dockerRequirement, pull_image)
52 args = ["--project-uuid="+project_uuid, image_name]
54 args.append(image_tag)
55 logger.info("Uploading Docker image %s", ":".join(args[1:]))
56 arvados.commands.keepdocker.main(args)
58 return dockerRequirement["dockerImageId"]
61 class CollectionFsAccess(cwltool.process.StdFsAccess):
62 def __init__(self, basedir):
64 self.basedir = basedir
66 def get_collection(self, path):
68 if p[0].startswith("keep:") and arvados.util.keep_locator_pattern.match(p[0][5:]):
70 if pdh not in self.collections:
71 self.collections[pdh] = arvados.collection.CollectionReader(pdh)
72 return (self.collections[pdh], "/".join(p[1:]))
76 def _match(self, collection, patternsegments, parent):
77 if not patternsegments:
80 if not isinstance(collection, arvados.collection.RichCollectionBase):
84 # iterate over the files and subcollections in 'collection'
85 for filename in collection:
86 if patternsegments[0] == '.':
87 # Pattern contains something like "./foo" so just shift
89 ret.extend(self._match(collection, patternsegments[1:], parent))
90 elif fnmatch.fnmatch(filename, patternsegments[0]):
91 cur = os.path.join(parent, filename)
92 if len(patternsegments) == 1:
95 ret.extend(self._match(collection[filename], patternsegments[1:], cur))
98 def glob(self, pattern):
99 collection, rest = self.get_collection(pattern)
100 patternsegments = rest.split("/")
101 return self._match(collection, patternsegments, "keep:" + collection.manifest_locator())
103 def open(self, fn, mode):
104 collection, rest = self.get_collection(fn)
106 return collection.open(rest, mode)
108 return open(self._abs(fn), mode)
110 def exists(self, fn):
111 collection, rest = self.get_collection(fn)
113 return collection.exists(rest)
115 return os.path.exists(self._abs(fn))
117 class ArvadosJob(object):
118 def __init__(self, runner):
119 self.arvrunner = runner
122 def run(self, dry_run=False, pull_image=True, **kwargs):
123 script_parameters = {
124 "command": self.command_line
126 runtime_constraints = {}
128 if self.generatefiles:
129 vwd = arvados.collection.Collection()
130 script_parameters["task.vwd"] = {}
131 for t in self.generatefiles:
132 if isinstance(self.generatefiles[t], dict):
133 src, rest = self.arvrunner.fs_access.get_collection(self.generatefiles[t]["path"].replace("$(task.keep)/", "keep:"))
134 vwd.copy(rest, t, source_collection=src)
136 with vwd.open(t, "w") as f:
137 f.write(self.generatefiles[t])
139 for t in self.generatefiles:
140 script_parameters["task.vwd"][t] = "$(task.keep)/%s/%s" % (vwd.portable_data_hash(), t)
142 script_parameters["task.env"] = {"TMPDIR": "$(task.tmpdir)"}
144 script_parameters["task.env"].update(self.environment)
147 script_parameters["task.stdin"] = self.pathmapper.mapper(self.stdin)[1]
150 script_parameters["task.stdout"] = self.stdout
152 (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
153 if docker_req and kwargs.get("use_container") is not False:
154 runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image, self.arvrunner.project_uuid)
156 resources = self.builder.resources
157 if resources is not None:
158 runtime_constraints["min_cores_per_node"] = resources.get("cores", 1)
159 runtime_constraints["min_ram_mb_per_node"] = resources.get("ram")
160 runtime_constraints["min_scratch_mb_per_node"] = resources.get("tmpdirSize", 0) + resources.get("outdirSize", 0)
163 response = self.arvrunner.api.jobs().create(body={
164 "owner_uuid": self.arvrunner.project_uuid,
165 "script": "crunchrunner",
166 "repository": "arvados",
167 "script_version": "master",
168 "minimum_script_version": "9e5b98e8f5f4727856b53447191f9c06e3da2ba6",
169 "script_parameters": {"tasks": [script_parameters], "crunchrunner": crunchrunner_pdh+"/crunchrunner"},
170 "runtime_constraints": runtime_constraints
171 }, find_or_create=kwargs.get("enable_reuse", True)).execute(num_retries=self.arvrunner.num_retries)
173 self.arvrunner.jobs[response["uuid"]] = self
175 self.arvrunner.pipeline["components"][self.name] = {"job": response}
176 self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"],
178 "components": self.arvrunner.pipeline["components"]
179 }).execute(num_retries=self.arvrunner.num_retries)
181 logger.info("Job %s (%s) is %s", self.name, response["uuid"], response["state"])
183 if response["state"] in ("Complete", "Failed", "Cancelled"):
185 except Exception as e:
186 logger.error("Got error %s" % str(e))
187 self.output_callback({}, "permanentFail")
189 def update_pipeline_component(self, record):
190 self.arvrunner.pipeline["components"][self.name] = {"job": record}
191 self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"],
193 "components": self.arvrunner.pipeline["components"]
194 }).execute(num_retries=self.arvrunner.num_retries)
196 def done(self, record):
198 self.update_pipeline_component(record)
203 if record["state"] == "Complete":
204 processStatus = "success"
206 processStatus = "permanentFail"
211 logc = arvados.collection.Collection(record["log"])
212 log = logc.open(logc.keys()[0])
216 for l in log.readlines():
217 g = tmpdirre.match(l)
220 g = outdirre.match(l)
227 # It turns out if the job fails and restarts it can
228 # come up on a different compute node, so we have to
229 # read the log to the end to be sure instead of taking the
232 #if tmpdir and outdir and keepdir:
235 self.builder.outdir = outdir
236 self.builder.pathmapper.keepdir = keepdir
237 outputs = self.collect_outputs("keep:" + record["output"])
238 except WorkflowException as e:
239 logger.error("Error while collecting job outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False))
240 processStatus = "permanentFail"
241 except Exception as e:
242 logger.exception("Got unknown exception while collecting job outputs:")
243 processStatus = "permanentFail"
245 self.output_callback(outputs, processStatus)
247 del self.arvrunner.jobs[record["uuid"]]
250 class ArvPathMapper(cwltool.pathmapper.PathMapper):
251 def __init__(self, arvrunner, referenced_files, basedir,
252 collection_pattern, file_pattern, **kwargs):
253 self._pathmap = arvrunner.get_uploaded()
256 pdh_path = re.compile(r'^keep:[0-9a-f]{32}\+\d+/.+')
258 for src in referenced_files:
259 if isinstance(src, basestring) and pdh_path.match(src):
260 self._pathmap[src] = (src, collection_pattern % src[5:])
261 if src not in self._pathmap:
262 ab = cwltool.pathmapper.abspath(src, basedir)
263 st = arvados.commands.run.statfile("", ab, fnPattern=file_pattern)
264 if kwargs.get("conformance_test"):
265 self._pathmap[src] = (src, ab)
266 elif isinstance(st, arvados.commands.run.UploadFile):
267 uploadfiles.append((src, ab, st))
268 elif isinstance(st, arvados.commands.run.ArvFile):
269 self._pathmap[src] = (ab, st.fn)
271 raise cwltool.workflow.WorkflowException("Input file path '%s' is invalid" % st)
274 arvados.commands.run.uploadfiles([u[2] for u in uploadfiles],
276 dry_run=kwargs.get("dry_run"),
278 fnPattern=file_pattern,
279 project=arvrunner.project_uuid)
281 for src, ab, st in uploadfiles:
282 arvrunner.add_uploaded(src, (ab, st.fn))
283 self._pathmap[src] = (ab, st.fn)
287 def reversemap(self, target):
288 if target.startswith("keep:"):
289 return (target, target)
290 elif self.keepdir and target.startswith(self.keepdir):
291 return (target, "keep:" + target[len(self.keepdir)+1:])
293 return super(ArvPathMapper, self).reversemap(target)
296 class ArvadosCommandTool(cwltool.draft2tool.CommandLineTool):
297 def __init__(self, arvrunner, toolpath_object, **kwargs):
298 super(ArvadosCommandTool, self).__init__(toolpath_object, **kwargs)
299 self.arvrunner = arvrunner
301 def makeJobRunner(self):
302 return ArvadosJob(self.arvrunner)
304 def makePathMapper(self, reffiles, input_basedir, **kwargs):
305 return ArvPathMapper(self.arvrunner, reffiles, input_basedir,
307 "$(task.keep)/%s/%s",
311 class ArvCwlRunner(object):
312 def __init__(self, api_client):
313 self.api = api_client
315 self.lock = threading.Lock()
316 self.cond = threading.Condition(self.lock)
317 self.final_output = None
321 def arvMakeTool(self, toolpath_object, **kwargs):
322 if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
323 return ArvadosCommandTool(self, toolpath_object, **kwargs)
325 return cwltool.workflow.defaultMakeTool(toolpath_object, **kwargs)
327 def output_callback(self, out, processStatus):
328 if processStatus == "success":
329 logger.info("Overall job status is %s", processStatus)
330 self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
331 body={"state": "Complete"}).execute(num_retries=self.num_retries)
334 logger.warn("Overall job status is %s", processStatus)
335 self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
336 body={"state": "Failed"}).execute(num_retries=self.num_retries)
337 self.final_output = out
340 def on_message(self, event):
341 if "object_uuid" in event:
342 if event["object_uuid"] in self.jobs and event["event_type"] == "update":
343 if event["properties"]["new_attributes"]["state"] == "Running" and self.jobs[event["object_uuid"]].running is False:
344 uuid = event["object_uuid"]
347 logger.info("Job %s (%s) is Running", j.name, uuid)
349 j.update_pipeline_component(event["properties"]["new_attributes"])
350 elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled"):
351 uuid = event["object_uuid"]
355 logger.info("Job %s (%s) is %s", j.name, uuid, event["properties"]["new_attributes"]["state"])
356 j.done(event["properties"]["new_attributes"])
361 def get_uploaded(self):
362 return self.uploaded.copy()
364 def add_uploaded(self, src, pair):
365 self.uploaded[src] = pair
367 def upload_docker(self, tool):
370 def submit(self, tool, job_order, input_basedir, args, **kwargs):
372 def visitFiles(self, path):
375 adjustFiles(process.scandeps("", tool.tool,
377 set(("$schemas", "path"))),
379 adjustFiles(job_order, visitFiles)
381 mapper = ArvPathMapper(self, files, "",
383 "$(task.keep)/%s/%s",
386 job_order = adjustFiles(job_order, lambda p: mapper.mapper(p))
388 response = self.api.jobs().create(body={
389 "script": "cwl-runner",
390 "script_version": "8654-arv-jobs-cwl-runner",
391 "repository": "arvados",
392 "script_parameters": job_order,
393 "runtime_constraints": {
394 "docker_image": "arvados/jobs"
396 }, find_or_create=args.enable_reuse).execute(num_retries=self.num_retries)
397 print response["uuid"]
401 def arvExecutor(self, tool, job_order, input_basedir, args, **kwargs):
403 self.submit(tool, job_order, input_basedir, args, **kwargs)
406 events = arvados.events.subscribe(arvados.api('v1'), [["object_uuid", "is_a", "arvados#job"]], self.on_message)
408 self.debug = args.debug
411 self.api.collections().get(uuid=crunchrunner_pdh).execute()
412 except arvados.errors.ApiError as e:
414 h = httplib2.Http(ca_certs=arvados.util.ca_certs_path())
415 resp, content = h.request(crunchrunner_download, "GET")
416 resp2, content2 = h.request(certs_download, "GET")
417 with arvados.collection.Collection() as col:
418 with col.open("crunchrunner", "w") as f:
420 with col.open("ca-certificates.crt", "w") as f:
423 col.save_new("crunchrunner binary", ensure_unique_name=True)
425 self.fs_access = CollectionFsAccess(input_basedir)
427 kwargs["fs_access"] = self.fs_access
428 kwargs["enable_reuse"] = args.enable_reuse
430 kwargs["outdir"] = "$(task.outdir)"
431 kwargs["tmpdir"] = "$(task.tmpdir)"
433 useruuid = self.api.users().current().execute()["uuid"]
434 self.project_uuid = args.project_uuid if args.project_uuid else useruuid
436 if kwargs.get("conformance_test"):
437 return cwltool.main.single_job_executor(tool, job_order, input_basedir, args, **kwargs)
439 self.pipeline = self.api.pipeline_instances().create(
441 "owner_uuid": self.project_uuid,
442 "name": shortname(tool.tool["id"]),
444 "state": "RunningOnClient"}).execute(num_retries=self.num_retries)
446 logger.info("Pipeline instance %s", self.pipeline["uuid"])
448 jobiter = tool.job(job_order,
450 self.output_callback,
451 docker_outdir="$(task.outdir)",
456 # Will continue to hold the lock for the duration of this code
457 # except when in cond.wait(), at which point on_message can update
458 # job state and process output callbacks.
460 for runnable in jobiter:
462 runnable.run(**kwargs)
467 logger.error("Workflow is deadlocked, no runnable jobs and not waiting on any pending jobs.")
475 if self.final_output is None:
476 raise cwltool.workflow.WorkflowException("Workflow did not return a result.")
478 # create final output collection
480 if sys.exc_info()[0] is KeyboardInterrupt:
481 logger.error("Interrupted, marking pipeline as failed")
483 logger.error("Caught unhandled exception, marking pipeline as failed. Error was: %s", sys.exc_info()[0], exc_info=(sys.exc_info()[1] if self.debug else False))
484 self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
485 body={"state": "Failed"}).execute(num_retries=self.num_retries)
489 return self.final_output
492 def main(args, stdout, stderr, api_client=None):
493 args.insert(0, "--leave-outputs")
494 parser = cwltool.main.arg_parser()
495 exgroup = parser.add_mutually_exclusive_group()
496 exgroup.add_argument("--enable-reuse", action="store_true",
497 default=True, dest="enable_reuse",
499 exgroup.add_argument("--disable-reuse", action="store_false",
500 default=True, dest="enable_reuse",
502 parser.add_argument("--project-uuid", type=str, help="Project that will own the workflow jobs")
503 parser.add_argument("--submit", type=str, help="Submit job and print job uuid.")
506 runner = ArvCwlRunner(api_client=arvados.api('v1', model=OrderedJsonModel()))
507 except Exception as e:
511 return cwltool.main.main(args, executor=runner.arvExecutor, makeTool=runner.arvMakeTool, parser=parser)