X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/78af1220d9e2ddf4d933d9a9487397414d8a3909..e6d8eb6d2415f15439ab6b7ab715ca962e7e7763:/sdk/cwl/arvados_cwl/__init__.py diff --git a/sdk/cwl/arvados_cwl/__init__.py b/sdk/cwl/arvados_cwl/__init__.py index 4198c34482..ca807951bd 100644 --- a/sdk/cwl/arvados_cwl/__init__.py +++ b/sdk/cwl/arvados_cwl/__init__.py @@ -5,21 +5,35 @@ import arvados import arvados.events import arvados.commands.keepdocker import arvados.commands.run +import arvados.collection +import arvados.util import cwltool.draft2tool import cwltool.workflow import cwltool.main +from cwltool.process import shortname import threading import cwltool.docker import fnmatch import logging import re import os +import sys from cwltool.process import get_feature +from arvados.api import OrderedJsonModel logger = logging.getLogger('arvados.cwl-runner') logger.setLevel(logging.INFO) +crunchrunner_pdh = "83db29f08544e1c319572a6bd971088a+140" +crunchrunner_download = "https://cloud.curoverse.com/collections/download/qr1hi-4zz18-n3m1yxd0vx78jic/1i1u2qtq66k1atziv4ocfgsg5nu5tj11n4r6e0bhvjg03rix4m/crunchrunner" +certs_download = "https://cloud.curoverse.com/collections/download/qr1hi-4zz18-n3m1yxd0vx78jic/1i1u2qtq66k1atziv4ocfgsg5nu5tj11n4r6e0bhvjg03rix4m/ca-certificates.crt" + +tmpdirre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.tmpdir\)=(.*)") +outdirre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.outdir\)=(.*)") +keepre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.keep\)=(.*)") + + def arv_docker_get_image(api_client, dockerRequirement, pull_image): if "dockerImageId" not in dockerRequirement and "dockerPull" in dockerRequirement: dockerRequirement["dockerImageId"] = dockerRequirement["dockerPull"] @@ -37,6 +51,7 @@ def arv_docker_get_image(api_client, dockerRequirement, pull_image): args = [image_name] if image_tag: args.append(image_tag) + logger.info("Uploading Docker image %s", ":".join(args)) arvados.commands.keepdocker.main(args) return dockerRequirement["dockerImageId"] @@ -137,18 +152,37 @@ class ArvadosJob(object): if docker_req and kwargs.get("use_container") is not False: runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image) + resources = self.builder.resources + if resources is not None: + if "coresMin" in resources.keys(): + try: + runtime_constraints["min_cores_per_node"] = int(resources["coresMin"]) + except: + runtime_constraints["min_cores_per_node"] = None + if "ramMin" in resources.keys(): + try: + runtime_constraints["min_ram_mb_per_node"] = int(resources["ramMin"]) + except: + runtime_constraints["min_ram_mb_per_node"] = None + try: response = self.arvrunner.api.jobs().create(body={ "script": "crunchrunner", - "repository": kwargs["repository"], - "script_version": "master", - "script_parameters": {"tasks": [script_parameters]}, + "repository": "arvados", + "script_version": "8488-cwl-crunchrunner-collection", + "script_parameters": {"tasks": [script_parameters], "crunchrunner": crunchrunner_pdh+"/crunchrunner"}, "runtime_constraints": runtime_constraints - }, find_or_create=kwargs.get("enable_reuse", True)).execute() + }, find_or_create=kwargs.get("enable_reuse", True)).execute(num_retries=self.arvrunner.num_retries) self.arvrunner.jobs[response["uuid"]] = self - logger.info("Job %s is %s", response["uuid"], response["state"]) + self.arvrunner.pipeline["components"][self.name] = {"job": response} + self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"], + body={ + "components": self.arvrunner.pipeline["components"] + }).execute(num_retries=self.arvrunner.num_retries) + + logger.info("Job %s (%s) is %s", self.name, response["uuid"], response["state"]) if response["state"] in ("Complete", "Failed", "Cancelled"): self.done(response) @@ -156,8 +190,19 @@ class ArvadosJob(object): logger.error("Got error %s" % str(e)) self.output_callback({}, "permanentFail") + def update_pipeline_component(self, record): + self.arvrunner.pipeline["components"][self.name] = {"job": record} + self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"], + body={ + "components": self.arvrunner.pipeline["components"] + }).execute(num_retries=self.arvrunner.num_retries) def done(self, record): + try: + self.update_pipeline_component(record) + except: + pass + try: if record["state"] == "Complete": processStatus = "success" @@ -166,7 +211,28 @@ class ArvadosJob(object): try: outputs = {} - outputs = self.collect_outputs("keep:" + record["output"]) + if record["output"]: + logc = arvados.collection.Collection(record["log"]) + log = logc.open(logc.keys()[0]) + tmpdir = None + outdir = None + keepdir = None + for l in log.readlines(): + g = tmpdirre.match(l) + if g: + tmpdir = g.group(1) + g = outdirre.match(l) + if g: + outdir = g.group(1) + g = keepre.match(l) + if g: + keepdir = g.group(1) + if tmpdir and outdir and keepdir: + break + + self.builder.outdir = outdir + self.builder.pathmapper.keepdir = keepdir + outputs = self.collect_outputs("keep:" + record["output"]) except Exception as e: logger.exception("Got exception while collecting job outputs:") processStatus = "permanentFail" @@ -188,7 +254,7 @@ class ArvPathMapper(cwltool.pathmapper.PathMapper): self._pathmap[src] = (src, "$(task.keep)/%s" % src[5:]) if src not in self._pathmap: ab = cwltool.pathmapper.abspath(src, basedir) - st = arvados.commands.run.statfile("", ab) + st = arvados.commands.run.statfile("", ab, fnPattern="$(task.keep)/%s/%s") if kwargs.get("conformance_test"): self._pathmap[src] = (src, ab) elif isinstance(st, arvados.commands.run.UploadFile): @@ -209,11 +275,20 @@ class ArvPathMapper(cwltool.pathmapper.PathMapper): arvrunner.add_uploaded(src, (ab, st.fn)) self._pathmap[src] = (ab, st.fn) + self.keepdir = None + + def reversemap(self, target): + if target.startswith("keep:"): + return target + elif self.keepdir and target.startswith(self.keepdir): + return "keep:" + target[len(self.keepdir)+1:] + else: + return super(ArvPathMapper, self).reversemap(target) class ArvadosCommandTool(cwltool.draft2tool.CommandLineTool): def __init__(self, arvrunner, toolpath_object, **kwargs): - super(ArvadosCommandTool, self).__init__(toolpath_object, outdir="$(task.outdir)", tmpdir="$(task.tmpdir)", **kwargs) + super(ArvadosCommandTool, self).__init__(toolpath_object, **kwargs) self.arvrunner = arvrunner def makeJobRunner(self): @@ -231,6 +306,7 @@ class ArvCwlRunner(object): self.cond = threading.Condition(self.lock) self.final_output = None self.uploaded = {} + self.num_retries = 4 def arvMakeTool(self, toolpath_object, **kwargs): if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool": @@ -241,22 +317,33 @@ class ArvCwlRunner(object): def output_callback(self, out, processStatus): if processStatus == "success": logger.info("Overall job status is %s", processStatus) + self.api.pipeline_instances().update(uuid=self.pipeline["uuid"], + body={"state": "Complete"}).execute(num_retries=self.num_retries) + else: logger.warn("Overall job status is %s", processStatus) + self.api.pipeline_instances().update(uuid=self.pipeline["uuid"], + body={"state": "Failed"}).execute(num_retries=self.num_retries) self.final_output = out + def on_message(self, event): if "object_uuid" in event: if event["object_uuid"] in self.jobs and event["event_type"] == "update": if event["properties"]["new_attributes"]["state"] == "Running" and self.jobs[event["object_uuid"]].running is False: - logger.info("Job %s is Running", event["object_uuid"]) + uuid = event["object_uuid"] with self.lock: - self.jobs[event["object_uuid"]].running = True + j = self.jobs[uuid] + logger.info("Job %s (%s) is Running", j.name, uuid) + j.running = True + j.update_pipeline_component(event["properties"]["new_attributes"]) elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled"): - logger.info("Job %s is %s", event["object_uuid"], event["properties"]["new_attributes"]["state"]) + uuid = event["object_uuid"] try: self.cond.acquire() - self.jobs[event["object_uuid"]].done(event["properties"]["new_attributes"]) + j = self.jobs[uuid] + logger.info("Job %s (%s) is %s", j.name, uuid, event["properties"]["new_attributes"]["state"]) + j.done(event["properties"]["new_attributes"]) self.cond.notify() finally: self.cond.release() @@ -270,52 +357,84 @@ class ArvCwlRunner(object): def arvExecutor(self, tool, job_order, input_basedir, args, **kwargs): events = arvados.events.subscribe(arvados.api('v1'), [["object_uuid", "is_a", "arvados#job"]], self.on_message) + try: + self.api.collections().get(uuid=crunchrunner_pdh).execute() + except arvados.errors.ApiError as e: + import httplib2 + h = httplib2.Http(ca_certs=arvados.util.ca_certs_path()) + resp, content = h.request(crunchrunner_download, "GET") + resp2, content2 = h.request(certs_download, "GET") + with arvados.collection.Collection() as col: + with col.open("crunchrunner", "w") as f: + f.write(content) + with col.open("ca-certificates.crt", "w") as f: + f.write(content2) + + col.save_new("crunchrunner binary", ensure_unique_name=True) + self.fs_access = CollectionFsAccess(input_basedir) kwargs["fs_access"] = self.fs_access kwargs["enable_reuse"] = args.enable_reuse - kwargs["repository"] = args.repository + + kwargs["outdir"] = "$(task.outdir)" + kwargs["tmpdir"] = "$(task.tmpdir)" if kwargs.get("conformance_test"): return cwltool.main.single_job_executor(tool, job_order, input_basedir, args, **kwargs) else: - jobiter = tool.job(job_order, - input_basedir, - self.output_callback, - **kwargs) - - for runnable in jobiter: - if runnable: - with self.lock: - runnable.run(**kwargs) - else: - if self.jobs: - try: - self.cond.acquire() - self.cond.wait() - finally: - self.cond.release() - else: - logger.error("Workflow cannot make any more progress.") - break - - while self.jobs: - try: - self.cond.acquire() - self.cond.wait() - finally: - self.cond.release() + self.pipeline = self.api.pipeline_instances().create(body={"name": shortname(tool.tool["id"]), + "components": {}, + "state": "RunningOnClient"}).execute(num_retries=self.num_retries) - events.close() + jobiter = tool.job(job_order, + input_basedir, + self.output_callback, + docker_outdir="$(task.outdir)", + **kwargs) - if self.final_output is None: - raise cwltool.workflow.WorkflowException("Workflow did not return a result.") + try: + for runnable in jobiter: + if runnable: + with self.lock: + runnable.run(**kwargs) + else: + if self.jobs: + try: + self.cond.acquire() + self.cond.wait(1) + except RuntimeError: + pass + finally: + self.cond.release() + else: + logger.error("Workflow cannot make any more progress.") + break + + while self.jobs: + try: + self.cond.acquire() + self.cond.wait(1) + except RuntimeError: + pass + finally: + self.cond.release() + + events.close() + + if self.final_output is None: + raise cwltool.workflow.WorkflowException("Workflow did not return a result.") + + except: + if sys.exc_info()[0] is not KeyboardInterrupt: + logger.exception("Caught unhandled exception, marking pipeline as failed") + self.api.pipeline_instances().update(uuid=self.pipeline["uuid"], + body={"state": "Failed"}).execute(num_retries=self.num_retries) return self.final_output def main(args, stdout, stderr, api_client=None): - runner = ArvCwlRunner(api_client=arvados.api('v1')) args.insert(0, "--leave-outputs") parser = cwltool.main.arg_parser() exgroup = parser.add_mutually_exclusive_group() @@ -326,6 +445,10 @@ def main(args, stdout, stderr, api_client=None): default=False, dest="enable_reuse", help="") - parser.add_argument('--repository', type=str, default="peter/crunchrunner", help="Repository containing the 'crunchrunner' program.") + try: + runner = ArvCwlRunner(api_client=arvados.api('v1', model=OrderedJsonModel())) + except Exception as e: + logger.error(e) + return 1 return cwltool.main.main(args, executor=runner.arvExecutor, makeTool=runner.arvMakeTool, parser=parser)