Merge branch '10172-crunch2-container-output' closes #10172
[arvados.git] / sdk / cwl / arvados_cwl / __init__.py
1 #!/usr/bin/env python
2
3 # Implement cwl-runner interface for submitting and running work on Arvados, using
4 # either the Crunch jobs API or Crunch containers API.
5
6 import argparse
7 import logging
8 import os
9 import sys
10 import threading
11 import hashlib
12 import copy
13 import json
14 from functools import partial
15 import pkg_resources  # part of setuptools
16
17 from cwltool.errors import WorkflowException
18 import cwltool.main
19 import cwltool.workflow
20 import schema_salad
21
22 import arvados
23 import arvados.config
24
25 from .arvcontainer import ArvadosContainer, RunnerContainer
26 from .arvjob import ArvadosJob, RunnerJob, RunnerTemplate
27 from. runner import Runner, upload_instance
28 from .arvtool import ArvadosCommandTool
29 from .arvworkflow import ArvadosWorkflow, upload_workflow
30 from .fsaccess import CollectionFsAccess
31 from .perf import Perf
32 from .pathmapper import FinalOutputPathMapper
33 from ._version import __version__
34
35 from cwltool.pack import pack
36 from cwltool.process import shortname, UnsupportedRequirement, getListing
37 from cwltool.pathmapper import adjustFileObjs, adjustDirObjs
38 from cwltool.draft2tool import compute_checksums
39 from arvados.api import OrderedJsonModel
40
41 logger = logging.getLogger('arvados.cwl-runner')
42 metrics = logging.getLogger('arvados.cwl-runner.metrics')
43 logger.setLevel(logging.INFO)
44
45
46 class ArvCwlRunner(object):
47     """Execute a CWL tool or workflow, submit work (using either jobs or
48     containers API), wait for them to complete, and report output.
49
50     """
51
52     def __init__(self, api_client, work_api=None, keep_client=None, output_name=None):
53         self.api = api_client
54         self.processes = {}
55         self.lock = threading.Lock()
56         self.cond = threading.Condition(self.lock)
57         self.final_output = None
58         self.final_status = None
59         self.uploaded = {}
60         self.num_retries = 4
61         self.uuid = None
62         self.stop_polling = threading.Event()
63         self.poll_api = None
64         self.pipeline = None
65         self.final_output_collection = None
66         self.output_name = output_name
67         self.project_uuid = None
68
69         if keep_client is not None:
70             self.keep_client = keep_client
71         else:
72             self.keep_client = arvados.keep.KeepClient(api_client=self.api, num_retries=self.num_retries)
73
74         for api in ["jobs", "containers"]:
75             try:
76                 methods = self.api._rootDesc.get('resources')[api]['methods']
77                 if ('httpMethod' in methods['create'] and
78                     (work_api == api or work_api is None)):
79                     self.work_api = api
80                     break
81             except KeyError:
82                 pass
83         if not self.work_api:
84             if work_api is None:
85                 raise Exception("No supported APIs")
86             else:
87                 raise Exception("Unsupported API '%s'" % work_api)
88
89     def arv_make_tool(self, toolpath_object, **kwargs):
90         kwargs["work_api"] = self.work_api
91         if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
92             return ArvadosCommandTool(self, toolpath_object, **kwargs)
93         elif "class" in toolpath_object and toolpath_object["class"] == "Workflow":
94             return ArvadosWorkflow(self, toolpath_object, **kwargs)
95         else:
96             return cwltool.workflow.defaultMakeTool(toolpath_object, **kwargs)
97
98     def output_callback(self, out, processStatus):
99         if processStatus == "success":
100             logger.info("Overall process status is %s", processStatus)
101             if self.pipeline:
102                 self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
103                                                      body={"state": "Complete"}).execute(num_retries=self.num_retries)
104         else:
105             logger.warn("Overall process status is %s", processStatus)
106             if self.pipeline:
107                 self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
108                                                      body={"state": "Failed"}).execute(num_retries=self.num_retries)
109         self.final_status = processStatus
110         self.final_output = out
111
112     def on_message(self, event):
113         if "object_uuid" in event:
114             if event["object_uuid"] in self.processes and event["event_type"] == "update":
115                 if event["properties"]["new_attributes"]["state"] == "Running" and self.processes[event["object_uuid"]].running is False:
116                     uuid = event["object_uuid"]
117                     with self.lock:
118                         j = self.processes[uuid]
119                         logger.info("Job %s (%s) is Running", j.name, uuid)
120                         j.running = True
121                         j.update_pipeline_component(event["properties"]["new_attributes"])
122                 elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled"):
123                     uuid = event["object_uuid"]
124                     try:
125                         self.cond.acquire()
126                         j = self.processes[uuid]
127                         txt = self.work_api[0].upper() + self.work_api[1:-1]
128                         logger.info("%s %s (%s) is %s", txt, j.name, uuid, event["properties"]["new_attributes"]["state"])
129                         with Perf(metrics, "done %s" % j.name):
130                             j.done(event["properties"]["new_attributes"])
131                         self.cond.notify()
132                     finally:
133                         self.cond.release()
134
135     def poll_states(self):
136         """Poll status of jobs or containers listed in the processes dict.
137
138         Runs in a separate thread.
139         """
140
141         while True:
142             self.stop_polling.wait(15)
143             if self.stop_polling.is_set():
144                 break
145             with self.lock:
146                 keys = self.processes.keys()
147             if not keys:
148                 continue
149
150             if self.work_api == "containers":
151                 table = self.poll_api.containers()
152             elif self.work_api == "jobs":
153                 table = self.poll_api.jobs()
154
155             try:
156                 proc_states = table.list(filters=[["uuid", "in", keys]]).execute(num_retries=self.num_retries)
157             except Exception as e:
158                 logger.warn("Error checking states on API server: %s", e)
159                 continue
160
161             for p in proc_states["items"]:
162                 self.on_message({
163                     "object_uuid": p["uuid"],
164                     "event_type": "update",
165                     "properties": {
166                         "new_attributes": p
167                     }
168                 })
169
170     def get_uploaded(self):
171         return self.uploaded.copy()
172
173     def add_uploaded(self, src, pair):
174         self.uploaded[src] = pair
175
176     def check_writable(self, obj):
177         if isinstance(obj, dict):
178             if obj.get("writable"):
179                 raise UnsupportedRequirement("InitialWorkDir feature 'writable: true' not supported")
180             for v in obj.itervalues():
181                 self.check_writable(v)
182         if isinstance(obj, list):
183             for v in obj:
184                 self.check_writable(v)
185
186     def make_output_collection(self, name, outputObj):
187         outputObj = copy.deepcopy(outputObj)
188
189         files = []
190         def capture(fileobj):
191             files.append(fileobj)
192
193         adjustDirObjs(outputObj, capture)
194         adjustFileObjs(outputObj, capture)
195
196         generatemapper = FinalOutputPathMapper(files, "", "", separateDirs=False)
197
198         final = arvados.collection.Collection(api_client=self.api,
199                                               keep_client=self.keep_client,
200                                               num_retries=self.num_retries)
201
202         srccollections = {}
203         for k,v in generatemapper.items():
204             sp = k.split("/")
205             srccollection = sp[0][5:]
206             if srccollection not in srccollections:
207                 srccollections[srccollection] = arvados.collection.CollectionReader(
208                     srccollection,
209                     api_client=self.api,
210                     keep_client=self.keep_client,
211                     num_retries=self.num_retries)
212             reader = srccollections[srccollection]
213             try:
214                 srcpath = "/".join(sp[1:]) if len(sp) > 1 else "."
215                 final.copy(srcpath, v.target, source_collection=reader, overwrite=False)
216             except IOError as e:
217                 logger.warn("While preparing output collection: %s", e)
218
219         def rewrite(fileobj):
220             fileobj["location"] = generatemapper.mapper(fileobj["location"]).target
221             for k in ("basename", "size", "listing"):
222                 if k in fileobj:
223                     del fileobj[k]
224
225         adjustDirObjs(outputObj, rewrite)
226         adjustFileObjs(outputObj, rewrite)
227
228         with final.open("cwl.output.json", "w") as f:
229             json.dump(outputObj, f, sort_keys=True, indent=4, separators=(',',': '))
230
231         final.save_new(name=name, owner_uuid=self.project_uuid, ensure_unique_name=True)
232
233         logger.info("Final output collection %s \"%s\" (%s)", final.portable_data_hash(),
234                     final.api_response()["name"],
235                     final.manifest_locator())
236
237         self.final_output_collection = final
238
239     def set_crunch_output(self):
240         if self.work_api == "containers":
241             try:
242                 current = self.api.containers().current().execute(num_retries=self.num_retries)
243                 self.api.containers().update(uuid=current['uuid'],
244                                              body={
245                                                  'output': self.final_output_collection.portable_data_hash(),
246                                              }).execute(num_retries=self.num_retries)
247             except Exception as e:
248                 logger.info("Setting container output: %s", e)
249         elif self.work_api == "jobs" and "TASK_UUID" in os.environ:
250             self.api.job_tasks().update(uuid=os.environ["TASK_UUID"],
251                                    body={
252                                        'output': self.final_output_collection.portable_data_hash(),
253                                        'success': self.final_status == "success",
254                                        'progress':1.0
255                                    }).execute(num_retries=self.num_retries)
256
257     def arv_executor(self, tool, job_order, **kwargs):
258         self.debug = kwargs.get("debug")
259
260         tool.visit(self.check_writable)
261
262         useruuid = self.api.users().current().execute()["uuid"]
263         self.project_uuid = kwargs.get("project_uuid") if kwargs.get("project_uuid") else useruuid
264         self.pipeline = None
265         make_fs_access = kwargs.get("make_fs_access") or partial(CollectionFsAccess,
266                                                                  api_client=self.api,
267                                                                  keep_client=self.keep_client)
268         self.fs_access = make_fs_access(kwargs["basedir"])
269
270         if kwargs.get("create_template"):
271             tmpl = RunnerTemplate(self, tool, job_order, kwargs.get("enable_reuse"))
272             tmpl.save()
273             # cwltool.main will write our return value to stdout.
274             return tmpl.uuid
275
276         if kwargs.get("create_workflow") or kwargs.get("update_workflow"):
277             return upload_workflow(self, tool, job_order, self.project_uuid, kwargs.get("update_workflow"))
278
279         self.ignore_docker_for_reuse = kwargs.get("ignore_docker_for_reuse")
280
281         kwargs["make_fs_access"] = make_fs_access
282         kwargs["enable_reuse"] = kwargs.get("enable_reuse")
283         kwargs["use_container"] = True
284         kwargs["tmpdir_prefix"] = "tmp"
285         kwargs["on_error"] = "continue"
286         kwargs["compute_checksum"] = kwargs.get("compute_checksum")
287
288         if self.work_api == "containers":
289             kwargs["outdir"] = "/var/spool/cwl"
290             kwargs["docker_outdir"] = "/var/spool/cwl"
291             kwargs["tmpdir"] = "/tmp"
292             kwargs["docker_tmpdir"] = "/tmp"
293         elif self.work_api == "jobs":
294             kwargs["outdir"] = "$(task.outdir)"
295             kwargs["docker_outdir"] = "$(task.outdir)"
296             kwargs["tmpdir"] = "$(task.tmpdir)"
297
298         upload_instance(self, shortname(tool.tool["id"]), tool, job_order)
299
300         runnerjob = None
301         if kwargs.get("submit"):
302             if self.work_api == "containers":
303                 if tool.tool["class"] == "CommandLineTool":
304                     runnerjob = tool.job(job_order,
305                                          self.output_callback,
306                                          **kwargs).next()
307                 else:
308                     runnerjob = RunnerContainer(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name)
309             else:
310                 runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name)
311
312         if not kwargs.get("submit") and "cwl_runner_job" not in kwargs and not self.work_api == "containers":
313             # Create pipeline for local run
314             self.pipeline = self.api.pipeline_instances().create(
315                 body={
316                     "owner_uuid": self.project_uuid,
317                     "name": shortname(tool.tool["id"]),
318                     "components": {},
319                     "state": "RunningOnClient"}).execute(num_retries=self.num_retries)
320             logger.info("Pipeline instance %s", self.pipeline["uuid"])
321
322         if runnerjob and not kwargs.get("wait"):
323             runnerjob.run(wait=kwargs.get("wait"))
324             return runnerjob.uuid
325
326         self.poll_api = arvados.api('v1')
327         self.polling_thread = threading.Thread(target=self.poll_states)
328         self.polling_thread.start()
329
330         if runnerjob:
331             jobiter = iter((runnerjob,))
332         else:
333             if "cwl_runner_job" in kwargs:
334                 self.uuid = kwargs.get("cwl_runner_job").get('uuid')
335             jobiter = tool.job(job_order,
336                                self.output_callback,
337                                **kwargs)
338
339         try:
340             self.cond.acquire()
341             # Will continue to hold the lock for the duration of this code
342             # except when in cond.wait(), at which point on_message can update
343             # job state and process output callbacks.
344
345             loopperf = Perf(metrics, "jobiter")
346             loopperf.__enter__()
347             for runnable in jobiter:
348                 loopperf.__exit__()
349                 if runnable:
350                     with Perf(metrics, "run"):
351                         runnable.run(**kwargs)
352                 else:
353                     if self.processes:
354                         self.cond.wait(1)
355                     else:
356                         logger.error("Workflow is deadlocked, no runnable jobs and not waiting on any pending jobs.")
357                         break
358                 loopperf.__enter__()
359             loopperf.__exit__()
360
361             while self.processes:
362                 self.cond.wait(1)
363
364         except UnsupportedRequirement:
365             raise
366         except:
367             if sys.exc_info()[0] is KeyboardInterrupt:
368                 logger.error("Interrupted, marking pipeline as failed")
369             else:
370                 logger.error("Caught unhandled exception, marking pipeline as failed.  Error was: %s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
371             if self.pipeline:
372                 self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
373                                                      body={"state": "Failed"}).execute(num_retries=self.num_retries)
374             if runnerjob and runnerjob.uuid and self.work_api == "containers":
375                 self.api.container_requests().update(uuid=runnerjob.uuid,
376                                                      body={"priority": "0"}).execute(num_retries=self.num_retries)
377         finally:
378             self.cond.release()
379             self.stop_polling.set()
380             self.polling_thread.join()
381
382         if self.final_status == "UnsupportedRequirement":
383             raise UnsupportedRequirement("Check log for details.")
384
385         if self.final_output is None:
386             raise WorkflowException("Workflow did not return a result.")
387
388         if kwargs.get("submit") and isinstance(runnerjob, Runner):
389             logger.info("Final output collection %s", runnerjob.final_output)
390         else:
391             if self.output_name is None:
392                 self.output_name = "Output of %s" % (shortname(tool.tool["id"]))
393             self.make_output_collection(self.output_name, self.final_output)
394             self.set_crunch_output()
395
396         if self.final_status != "success":
397             raise WorkflowException("Workflow failed.")
398
399         if kwargs.get("compute_checksum"):
400             adjustDirObjs(self.final_output, partial(getListing, self.fs_access))
401             adjustFileObjs(self.final_output, partial(compute_checksums, self.fs_access))
402
403         return self.final_output
404
405
406 def versionstring():
407     """Print version string of key packages for provenance and debugging."""
408
409     arvcwlpkg = pkg_resources.require("arvados-cwl-runner")
410     arvpkg = pkg_resources.require("arvados-python-client")
411     cwlpkg = pkg_resources.require("cwltool")
412
413     return "%s %s %s, %s %s, %s %s" % (sys.argv[0], __version__, arvcwlpkg[0].version,
414                                     "arvados-python-client", arvpkg[0].version,
415                                     "cwltool", cwlpkg[0].version)
416
417
418 def arg_parser():  # type: () -> argparse.ArgumentParser
419     parser = argparse.ArgumentParser(description='Arvados executor for Common Workflow Language')
420
421     parser.add_argument("--basedir", type=str,
422                         help="Base directory used to resolve relative references in the input, default to directory of input object file or current directory (if inputs piped/provided on command line).")
423     parser.add_argument("--outdir", type=str, default=os.path.abspath('.'),
424                         help="Output directory, default current directory")
425
426     parser.add_argument("--eval-timeout",
427                         help="Time to wait for a Javascript expression to evaluate before giving an error, default 20s.",
428                         type=float,
429                         default=20)
430     parser.add_argument("--version", action="store_true", help="Print version and exit")
431
432     exgroup = parser.add_mutually_exclusive_group()
433     exgroup.add_argument("--verbose", action="store_true", help="Default logging")
434     exgroup.add_argument("--quiet", action="store_true", help="Only print warnings and errors.")
435     exgroup.add_argument("--debug", action="store_true", help="Print even more logging")
436
437     parser.add_argument("--metrics", action="store_true", help="Print timing metrics")
438
439     parser.add_argument("--tool-help", action="store_true", help="Print command line help for tool")
440
441     exgroup = parser.add_mutually_exclusive_group()
442     exgroup.add_argument("--enable-reuse", action="store_true",
443                         default=True, dest="enable_reuse",
444                         help="")
445     exgroup.add_argument("--disable-reuse", action="store_false",
446                         default=True, dest="enable_reuse",
447                         help="")
448
449     parser.add_argument("--project-uuid", type=str, metavar="UUID", help="Project that will own the workflow jobs, if not provided, will go to home project.")
450     parser.add_argument("--output-name", type=str, help="Name to use for collection that stores the final output.", default=None)
451     parser.add_argument("--ignore-docker-for-reuse", action="store_true",
452                         help="Ignore Docker image version when deciding whether to reuse past jobs.",
453                         default=False)
454
455     exgroup = parser.add_mutually_exclusive_group()
456     exgroup.add_argument("--submit", action="store_true", help="Submit workflow to run on Arvados.",
457                         default=True, dest="submit")
458     exgroup.add_argument("--local", action="store_false", help="Run workflow on local host (submits jobs to Arvados).",
459                         default=True, dest="submit")
460     exgroup.add_argument("--create-template", action="store_true", help="Create an Arvados pipeline template.")
461     exgroup.add_argument("--create-workflow", action="store_true", help="Create an Arvados workflow.")
462     exgroup.add_argument("--update-workflow", type=str, metavar="UUID", help="Update existing Arvados workflow with uuid.")
463
464     exgroup = parser.add_mutually_exclusive_group()
465     exgroup.add_argument("--wait", action="store_true", help="After submitting workflow runner job, wait for completion.",
466                         default=True, dest="wait")
467     exgroup.add_argument("--no-wait", action="store_false", help="Submit workflow runner job and exit.",
468                         default=True, dest="wait")
469
470     parser.add_argument("--api", type=str,
471                         default=None, dest="work_api",
472                         help="Select work submission API, one of 'jobs' or 'containers'.")
473
474     parser.add_argument("--compute-checksum", action="store_true", default=False,
475                         help="Compute checksum of contents while collecting outputs",
476                         dest="compute_checksum")
477
478     parser.add_argument("workflow", type=str, nargs="?", default=None, help="The workflow to execute")
479     parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.")
480
481     return parser
482
483 def add_arv_hints():
484     cache = {}
485     res = pkg_resources.resource_stream(__name__, 'arv-cwl-schema.yml')
486     cache["http://arvados.org/cwl"] = res.read()
487     res.close()
488     document_loader, cwlnames, _, _ = cwltool.process.get_schema("v1.0")
489     _, extnames, _, _ = schema_salad.schema.load_schema("http://arvados.org/cwl", cache=cache)
490     for n in extnames.names:
491         if not cwlnames.has_name("http://arvados.org/cwl#"+n, ""):
492             cwlnames.add_name("http://arvados.org/cwl#"+n, "", extnames.get_name(n, ""))
493         document_loader.idx["http://arvados.org/cwl#"+n] = {}
494
495 def main(args, stdout, stderr, api_client=None, keep_client=None):
496     parser = arg_parser()
497
498     job_order_object = None
499     arvargs = parser.parse_args(args)
500     if (arvargs.create_template or arvargs.create_workflow or arvargs.update_workflow) and not arvargs.job_order:
501         job_order_object = ({}, "")
502
503     add_arv_hints()
504
505     try:
506         if api_client is None:
507             api_client=arvados.api('v1', model=OrderedJsonModel())
508         runner = ArvCwlRunner(api_client, work_api=arvargs.work_api, keep_client=keep_client, output_name=arvargs.output_name)
509     except Exception as e:
510         logger.error(e)
511         return 1
512
513     if arvargs.debug:
514         logger.setLevel(logging.DEBUG)
515
516     if arvargs.quiet:
517         logger.setLevel(logging.WARN)
518         logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
519
520     if arvargs.metrics:
521         metrics.setLevel(logging.DEBUG)
522         logging.getLogger("cwltool.metrics").setLevel(logging.DEBUG)
523
524     arvargs.conformance_test = None
525     arvargs.use_container = True
526
527     return cwltool.main.main(args=arvargs,
528                              stdout=stdout,
529                              stderr=stderr,
530                              executor=runner.arv_executor,
531                              makeTool=runner.arv_make_tool,
532                              versionfunc=versionstring,
533                              job_order_object=job_order_object,
534                              make_fs_access=partial(CollectionFsAccess, api_client=api_client))