X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/c654baedb04251a741c840860041768ec661d3e7..091c92aef16f9657cf7b9eb8f8778aafa33f12c1:/sdk/cwl/arvados_cwl/__init__.py diff --git a/sdk/cwl/arvados_cwl/__init__.py b/sdk/cwl/arvados_cwl/__init__.py index 4c05f38187..2842e8a114 100644 --- a/sdk/cwl/arvados_cwl/__init__.py +++ b/sdk/cwl/arvados_cwl/__init__.py @@ -17,17 +17,21 @@ import pkg_resources # part of setuptools from cwltool.errors import WorkflowException import cwltool.main import cwltool.workflow +import cwltool.process import schema_salad +from schema_salad.sourceline import SourceLine import arvados import arvados.config +from arvados.keep import KeepClient +from arvados.errors import ApiError from .arvcontainer import ArvadosContainer, RunnerContainer from .arvjob import ArvadosJob, RunnerJob, RunnerTemplate -from. runner import Runner, upload_instance +from. runner import Runner, upload_docker, upload_job_order, upload_workflow_deps, upload_dependencies from .arvtool import ArvadosCommandTool from .arvworkflow import ArvadosWorkflow, upload_workflow -from .fsaccess import CollectionFsAccess +from .fsaccess import CollectionFsAccess, CollectionFetcher, collectionResolver from .perf import Perf from .pathmapper import FinalOutputPathMapper from ._version import __version__ @@ -42,6 +46,9 @@ logger = logging.getLogger('arvados.cwl-runner') metrics = logging.getLogger('arvados.cwl-runner.metrics') logger.setLevel(logging.INFO) +arvados.log_handler.setFormatter(logging.Formatter( + '%(asctime)s %(name)s %(levelname)s: %(message)s', + '%Y-%m-%d %H:%M:%S')) class ArvCwlRunner(object): """Execute a CWL tool or workflow, submit work (using either jobs or @@ -49,7 +56,7 @@ class ArvCwlRunner(object): """ - def __init__(self, api_client, work_api=None, keep_client=None, output_name=None, output_tags=None): + def __init__(self, api_client, work_api=None, keep_client=None, output_name=None, output_tags=None, num_retries=4): self.api = api_client self.processes = {} self.lock = threading.Lock() @@ -57,7 +64,7 @@ class ArvCwlRunner(object): self.final_output = None self.final_status = None self.uploaded = {} - self.num_retries = 4 + self.num_retries = num_retries self.uuid = None self.stop_polling = threading.Event() self.poll_api = None @@ -72,7 +79,9 @@ class ArvCwlRunner(object): else: self.keep_client = arvados.keep.KeepClient(api_client=self.api, num_retries=self.num_retries) - for api in ["jobs", "containers"]: + self.work_api = None + expected_api = ["jobs", "containers"] + for api in expected_api: try: methods = self.api._rootDesc.get('resources')[api]['methods'] if ('httpMethod' in methods['create'] and @@ -81,14 +90,18 @@ class ArvCwlRunner(object): break except KeyError: pass + if not self.work_api: if work_api is None: raise Exception("No supported APIs") else: - raise Exception("Unsupported API '%s'" % work_api) + raise Exception("Unsupported API '%s', expected one of %s" % (work_api, expected_api)) def arv_make_tool(self, toolpath_object, **kwargs): kwargs["work_api"] = self.work_api + kwargs["fetcher_constructor"] = partial(CollectionFetcher, + api_client=self.api, + keep_client=self.keep_client) if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool": return ArvadosCommandTool(self, toolpath_object, **kwargs) elif "class" in toolpath_object and toolpath_object["class"] == "Workflow": @@ -117,56 +130,67 @@ class ArvCwlRunner(object): uuid = event["object_uuid"] with self.lock: j = self.processes[uuid] - logger.info("Job %s (%s) is Running", j.name, uuid) + logger.info("%s %s is Running", self.label(j), uuid) j.running = True j.update_pipeline_component(event["properties"]["new_attributes"]) - elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled"): + elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled", "Final"): uuid = event["object_uuid"] try: self.cond.acquire() j = self.processes[uuid] - txt = self.work_api[0].upper() + self.work_api[1:-1] - logger.info("%s %s (%s) is %s", txt, j.name, uuid, event["properties"]["new_attributes"]["state"]) + logger.info("%s %s is %s", self.label(j), uuid, event["properties"]["new_attributes"]["state"]) with Perf(metrics, "done %s" % j.name): j.done(event["properties"]["new_attributes"]) self.cond.notify() finally: self.cond.release() + def label(self, obj): + return "[%s %s]" % (self.work_api[0:-1], obj.name) + def poll_states(self): """Poll status of jobs or containers listed in the processes dict. Runs in a separate thread. """ - while True: - self.stop_polling.wait(15) - if self.stop_polling.is_set(): - break - with self.lock: - keys = self.processes.keys() - if not keys: - continue + try: + while True: + self.stop_polling.wait(15) + if self.stop_polling.is_set(): + break + with self.lock: + keys = self.processes.keys() + if not keys: + continue - if self.work_api == "containers": - table = self.poll_api.containers() - elif self.work_api == "jobs": - table = self.poll_api.jobs() + if self.work_api == "containers": + table = self.poll_api.container_requests() + elif self.work_api == "jobs": + table = self.poll_api.jobs() - try: - proc_states = table.list(filters=[["uuid", "in", keys]]).execute(num_retries=self.num_retries) - except Exception as e: - logger.warn("Error checking states on API server: %s", e) - continue - - for p in proc_states["items"]: - self.on_message({ - "object_uuid": p["uuid"], - "event_type": "update", - "properties": { - "new_attributes": p - } - }) + try: + proc_states = table.list(filters=[["uuid", "in", keys]]).execute(num_retries=self.num_retries) + except Exception as e: + logger.warn("Error checking states on API server: %s", e) + continue + + for p in proc_states["items"]: + self.on_message({ + "object_uuid": p["uuid"], + "event_type": "update", + "properties": { + "new_attributes": p + } + }) + except: + logger.error("Fatal error in state polling thread.", exc_info=(sys.exc_info()[1] if self.debug else False)) + self.cond.acquire() + self.processes.clear() + self.cond.notify() + self.cond.release() + finally: + self.stop_polling.set() def get_uploaded(self): return self.uploaded.copy() @@ -174,15 +198,30 @@ class ArvCwlRunner(object): def add_uploaded(self, src, pair): self.uploaded[src] = pair - def check_writable(self, obj): + def check_features(self, obj): if isinstance(obj, dict): + if obj.get("class") == "InitialWorkDirRequirement": + if self.work_api == "containers": + raise UnsupportedRequirement("InitialWorkDirRequirement not supported with --api=containers") if obj.get("writable"): - raise UnsupportedRequirement("InitialWorkDir feature 'writable: true' not supported") + raise SourceLine(obj, "writable", UnsupportedRequirement).makeError("InitialWorkDir feature 'writable: true' not supported") + if obj.get("class") == "CommandLineTool": + if self.work_api == "containers": + if obj.get("stdin"): + raise SourceLine(obj, "stdin", UnsupportedRequirement).makeError("Stdin redirection currently not suppported with --api=containers") + if obj.get("stderr"): + raise SourceLine(obj, "stderr", UnsupportedRequirement).makeError("Stderr redirection currently not suppported with --api=containers") + if obj.get("class") == "DockerRequirement": + if obj.get("dockerOutputDirectory"): + # TODO: can be supported by containers API, but not jobs API. + raise SourceLine(obj, "dockerOutputDirectory", UnsupportedRequirement).makeError( + "Option 'dockerOutputDirectory' of DockerRequirement not supported.") for v in obj.itervalues(): - self.check_writable(v) - if isinstance(obj, list): - for v in obj: - self.check_writable(v) + self.check_features(v) + elif isinstance(obj, list): + for i,v in enumerate(obj): + with SourceLine(obj, i, UnsupportedRequirement): + self.check_features(v) def make_output_collection(self, name, tagsString, outputObj): outputObj = copy.deepcopy(outputObj) @@ -268,6 +307,12 @@ class ArvCwlRunner(object): if self.work_api == "containers": try: current = self.api.containers().current().execute(num_retries=self.num_retries) + except ApiError as e: + # Status code 404 just means we're not running in a container. + if e.resp.status != 404: + logger.info("Getting current container: %s", e) + return + try: self.api.containers().update(uuid=current['uuid'], body={ 'output': self.final_output_collection.portable_data_hash(), @@ -285,7 +330,7 @@ class ArvCwlRunner(object): def arv_executor(self, tool, job_order, **kwargs): self.debug = kwargs.get("debug") - tool.visit(self.check_writable) + tool.visit(self.check_features) self.project_uuid = kwargs.get("project_uuid") self.pipeline = None @@ -294,19 +339,44 @@ class ArvCwlRunner(object): keep_client=self.keep_client) self.fs_access = make_fs_access(kwargs["basedir"]) + if not kwargs.get("name"): + kwargs["name"] = self.name = tool.tool.get("label") or tool.metadata.get("label") or os.path.basename(tool.tool["id"]) + + # Upload direct dependencies of workflow steps, get back mapping of files to keep references. + # Also uploads docker images. + upload_workflow_deps(self, tool) + + # Reload tool object which may have been updated by + # upload_workflow_deps + tool = self.arv_make_tool(tool.doc_loader.idx[tool.tool["id"]], + makeTool=self.arv_make_tool, + loader=tool.doc_loader, + avsc_names=tool.doc_schema, + metadata=tool.metadata) + + # Upload local file references in the job order. + job_order = upload_job_order(self, "%s input" % kwargs["name"], + tool, job_order) + existing_uuid = kwargs.get("update_workflow") if existing_uuid or kwargs.get("create_workflow"): + # Create a pipeline template or workflow record and exit. if self.work_api == "jobs": tmpl = RunnerTemplate(self, tool, job_order, kwargs.get("enable_reuse"), - uuid=existing_uuid) + uuid=existing_uuid, + submit_runner_ram=kwargs.get("submit_runner_ram"), + name=kwargs["name"]) tmpl.save() # cwltool.main will write our return value to stdout. - return tmpl.uuid - else: - return upload_workflow(self, tool, job_order, - self.project_uuid, - uuid=existing_uuid) + return (tmpl.uuid, "success") + elif self.work_api == "containers": + return (upload_workflow(self, tool, job_order, + self.project_uuid, + uuid=existing_uuid, + submit_runner_ram=kwargs.get("submit_runner_ram"), + name=kwargs["name"]), + "success") self.ignore_docker_for_reuse = kwargs.get("ignore_docker_for_reuse") @@ -314,7 +384,6 @@ class ArvCwlRunner(object): kwargs["enable_reuse"] = kwargs.get("enable_reuse") kwargs["use_container"] = True kwargs["tmpdir_prefix"] = "tmp" - kwargs["on_error"] = "continue" kwargs["compute_checksum"] = kwargs.get("compute_checksum") if self.work_api == "containers": @@ -327,33 +396,51 @@ class ArvCwlRunner(object): kwargs["docker_outdir"] = "$(task.outdir)" kwargs["tmpdir"] = "$(task.tmpdir)" - upload_instance(self, shortname(tool.tool["id"]), tool, job_order) - runnerjob = None if kwargs.get("submit"): + # Submit a runner job to run the workflow for us. if self.work_api == "containers": if tool.tool["class"] == "CommandLineTool": + kwargs["runnerjob"] = tool.tool["id"] + upload_dependencies(self, + kwargs["name"], + tool.doc_loader, + tool.tool, + tool.tool["id"], + False) runnerjob = tool.job(job_order, self.output_callback, **kwargs).next() else: - runnerjob = RunnerContainer(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name, self.output_tags) - else: - runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name, self.output_tags) - - if not kwargs.get("submit") and "cwl_runner_job" not in kwargs and not self.work_api == "containers": + runnerjob = RunnerContainer(self, tool, job_order, kwargs.get("enable_reuse"), + self.output_name, + self.output_tags, + submit_runner_ram=kwargs.get("submit_runner_ram"), + name=kwargs.get("name"), + on_error=kwargs.get("on_error"), + submit_runner_image=kwargs.get("submit_runner_image")) + elif self.work_api == "jobs": + runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"), + self.output_name, + self.output_tags, + submit_runner_ram=kwargs.get("submit_runner_ram"), + name=kwargs.get("name"), + on_error=kwargs.get("on_error"), + submit_runner_image=kwargs.get("submit_runner_image")) + + if not kwargs.get("submit") and "cwl_runner_job" not in kwargs and self.work_api == "jobs": # Create pipeline for local run self.pipeline = self.api.pipeline_instances().create( body={ "owner_uuid": self.project_uuid, - "name": shortname(tool.tool["id"]), + "name": kwargs["name"] if kwargs.get("name") else shortname(tool.tool["id"]), "components": {}, "state": "RunningOnClient"}).execute(num_retries=self.num_retries) logger.info("Pipeline instance %s", self.pipeline["uuid"]) if runnerjob and not kwargs.get("wait"): runnerjob.run(wait=kwargs.get("wait")) - return runnerjob.uuid + return (runnerjob.uuid, "success") self.poll_api = arvados.api('v1') self.polling_thread = threading.Thread(target=self.poll_states) @@ -378,6 +465,10 @@ class ArvCwlRunner(object): loopperf.__enter__() for runnable in jobiter: loopperf.__exit__() + + if self.stop_polling.is_set(): + break + if runnable: with Perf(metrics, "run"): runnable.run(**kwargs) @@ -399,7 +490,7 @@ class ArvCwlRunner(object): if sys.exc_info()[0] is KeyboardInterrupt: logger.error("Interrupted, marking pipeline as failed") else: - logger.error("Caught unhandled exception, marking pipeline as failed. Error was: %s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False)) + logger.error("Execution failed: %s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False)) if self.pipeline: self.api.pipeline_instances().update(uuid=self.pipeline["uuid"], body={"state": "Failed"}).execute(num_retries=self.num_retries) @@ -427,14 +518,11 @@ class ArvCwlRunner(object): self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, self.output_tags, self.final_output) self.set_crunch_output() - if self.final_status != "success": - raise WorkflowException("Workflow failed.") - if kwargs.get("compute_checksum"): adjustDirObjs(self.final_output, partial(getListing, self.fs_access)) adjustFileObjs(self.final_output, partial(compute_checksums, self.fs_access)) - return self.final_output + return (self.final_output, self.final_status) def versionstring(): @@ -503,6 +591,12 @@ def arg_parser(): # type: () -> argparse.ArgumentParser exgroup.add_argument("--no-wait", action="store_false", help="Submit workflow runner job and exit.", default=True, dest="wait") + exgroup = parser.add_mutually_exclusive_group() + exgroup.add_argument("--log-timestamps", action="store_true", help="Prefix logging lines with timestamp", + default=True, dest="log_timestamps") + exgroup.add_argument("--no-log-timestamps", action="store_false", help="No timestamp on logging lines", + default=True, dest="log_timestamps") + parser.add_argument("--api", type=str, default=None, dest="work_api", help="Select work submission API, one of 'jobs' or 'containers'. Default is 'jobs' if that API is available, otherwise 'containers'.") @@ -511,6 +605,22 @@ def arg_parser(): # type: () -> argparse.ArgumentParser help="Compute checksum of contents while collecting outputs", dest="compute_checksum") + parser.add_argument("--submit-runner-ram", type=int, + help="RAM (in MiB) required for the workflow runner job (default 1024)", + default=1024) + + parser.add_argument("--submit-runner-image", type=str, + help="Docker image for workflow runner job, default arvados/jobs:%s" % __version__, + default=None) + + parser.add_argument("--name", type=str, + help="Name to use for workflow execution instance.", + default=None) + + parser.add_argument("--on-error", type=str, + help="Desired workflow behavior when a step fails. One of 'stop' or 'continue'. " + "Default is 'continue'.", default="continue", choices=("stop", "continue")) + parser.add_argument("workflow", type=str, nargs="?", default=None, help="The workflow to execute") parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.") @@ -534,6 +644,10 @@ def main(args, stdout, stderr, api_client=None, keep_client=None): job_order_object = None arvargs = parser.parse_args(args) + if arvargs.version: + print versionstring() + return + if arvargs.update_workflow: if arvargs.update_workflow.find('-7fd4e-') == 5: want_api = 'containers' @@ -555,24 +669,39 @@ def main(args, stdout, stderr, api_client=None, keep_client=None): try: if api_client is None: api_client=arvados.api('v1', model=OrderedJsonModel()) - runner = ArvCwlRunner(api_client, work_api=arvargs.work_api, keep_client=keep_client, output_name=arvargs.output_name, output_tags=arvargs.output_tags) + if keep_client is None: + keep_client = arvados.keep.KeepClient(api_client=api_client, num_retries=4) + runner = ArvCwlRunner(api_client, work_api=arvargs.work_api, keep_client=keep_client, + num_retries=4, output_name=arvargs.output_name, + output_tags=arvargs.output_tags) except Exception as e: logger.error(e) return 1 if arvargs.debug: logger.setLevel(logging.DEBUG) + logging.getLogger('arvados').setLevel(logging.DEBUG) if arvargs.quiet: logger.setLevel(logging.WARN) + logging.getLogger('arvados').setLevel(logging.WARN) logging.getLogger('arvados.arv-run').setLevel(logging.WARN) if arvargs.metrics: metrics.setLevel(logging.DEBUG) logging.getLogger("cwltool.metrics").setLevel(logging.DEBUG) + if arvargs.log_timestamps: + arvados.log_handler.setFormatter(logging.Formatter( + '%(asctime)s %(name)s %(levelname)s: %(message)s', + '%Y-%m-%d %H:%M:%S')) + else: + arvados.log_handler.setFormatter(logging.Formatter('%(name)s %(levelname)s: %(message)s')) + arvargs.conformance_test = None arvargs.use_container = True + arvargs.relax_path_checks = True + arvargs.validate = None return cwltool.main.main(args=arvargs, stdout=stdout, @@ -581,4 +710,12 @@ def main(args, stdout, stderr, api_client=None, keep_client=None): makeTool=runner.arv_make_tool, versionfunc=versionstring, job_order_object=job_order_object, - make_fs_access=partial(CollectionFsAccess, api_client=api_client)) + make_fs_access=partial(CollectionFsAccess, + api_client=api_client, + keep_client=keep_client), + fetcher_constructor=partial(CollectionFetcher, + api_client=api_client, + keep_client=keep_client, + num_retries=runner.num_retries), + resolver=partial(collectionResolver, api_client, num_retries=runner.num_retries), + logger_handler=arvados.log_handler)