import os
import sys
import threading
+import hashlib
+from functools import partial
import pkg_resources # part of setuptools
from cwltool.errors import WorkflowException
import cwltool.workflow
import arvados
-import arvados.events
+import arvados.config
from .arvcontainer import ArvadosContainer, RunnerContainer
from .arvjob import ArvadosJob, RunnerJob, RunnerTemplate
from .arvtool import ArvadosCommandTool
from .fsaccess import CollectionFsAccess
+from .arvworkflow import make_workflow
+from .perf import Perf
from cwltool.process import shortname, UnsupportedRequirement
+from cwltool.pathmapper import adjustFileObjs
+from cwltool.draft2tool import compute_checksums
from arvados.api import OrderedJsonModel
logger = logging.getLogger('arvados.cwl-runner')
self.num_retries = 4
self.uuid = None
self.work_api = work_api
+ self.stop_polling = threading.Event()
+ self.poll_api = None
if self.work_api is None:
# todo: autodetect API to use.
def arvMakeTool(self, toolpath_object, **kwargs):
if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
- return ArvadosCommandTool(self, toolpath_object, work_api=self.work_api, **kwargs)
+ kwargs["work_api"] = self.work_api
+ return ArvadosCommandTool(self, toolpath_object, **kwargs)
else:
return cwltool.workflow.defaultMakeTool(toolpath_object, **kwargs)
self.cond.acquire()
j = self.processes[uuid]
logger.info("Job %s (%s) is %s", j.name, uuid, event["properties"]["new_attributes"]["state"])
- j.done(event["properties"]["new_attributes"])
+ with Perf(logger, "done %s" % j.name):
+ j.done(event["properties"]["new_attributes"])
self.cond.notify()
finally:
self.cond.release()
+ def poll_states(self):
+ """Poll status of jobs or containers listed in the processes dict.
+
+ Runs in a separate thread.
+ """
+
+ while True:
+ self.stop_polling.wait(15)
+ if self.stop_polling.is_set():
+ break
+ with self.lock:
+ keys = self.processes.keys()
+ if not keys:
+ continue
+
+ if self.work_api == "containers":
+ table = self.poll_api.containers()
+ elif self.work_api == "jobs":
+ table = self.poll_api.jobs()
+
+ try:
+ proc_states = table.list(filters=[["uuid", "in", keys]]).execute(num_retries=self.num_retries)
+ except Exception as e:
+ logger.warn("Error checking states on API server: %s", e)
+ continue
+
+ for p in proc_states["items"]:
+ self.on_message({
+ "object_uuid": p["uuid"],
+ "event_type": "update",
+ "properties": {
+ "new_attributes": p
+ }
+ })
+
def get_uploaded(self):
return self.uploaded.copy()
def add_uploaded(self, src, pair):
self.uploaded[src] = pair
+ def check_writable(self, obj):
+ if isinstance(obj, dict):
+ if obj.get("writable"):
+ raise UnsupportedRequirement("InitialWorkDir feature 'writable: true' not supported")
+ for v in obj.itervalues():
+ self.check_writable(v)
+ if isinstance(obj, list):
+ for v in obj:
+ self.check_writable(v)
+
def arvExecutor(self, tool, job_order, **kwargs):
self.debug = kwargs.get("debug")
+ tool.visit(self.check_writable)
+
if kwargs.get("quiet"):
logger.setLevel(logging.WARN)
logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
+ if self.debug:
+ logger.setLevel(logging.DEBUG)
+
useruuid = self.api.users().current().execute()["uuid"]
self.project_uuid = kwargs.get("project_uuid") if kwargs.get("project_uuid") else useruuid
self.pipeline = None
+ make_fs_access = kwargs.get("make_fs_access") or partial(CollectionFsAccess, api_client=self.api)
+ self.fs_access = make_fs_access(kwargs["basedir"])
if kwargs.get("create_template"):
tmpl = RunnerTemplate(self, tool, job_order, kwargs.get("enable_reuse"))
# cwltool.main will write our return value to stdout.
return tmpl.uuid
- self.debug = kwargs.get("debug")
+ if kwargs.get("create_workflow") or kwargs.get("update_workflow"):
+ return make_workflow(self, tool, job_order, self.project_uuid, kwargs.get("update_workflow"))
+
self.ignore_docker_for_reuse = kwargs.get("ignore_docker_for_reuse")
- self.fs_access = CollectionFsAccess(kwargs["basedir"])
- kwargs["fs_access"] = self.fs_access
+ kwargs["make_fs_access"] = make_fs_access
kwargs["enable_reuse"] = kwargs.get("enable_reuse")
+ kwargs["use_container"] = True
+ kwargs["tmpdir_prefix"] = "tmp"
+ kwargs["on_error"] = "continue"
+ kwargs["compute_checksum"] = kwargs.get("compute_checksum")
if self.work_api == "containers":
kwargs["outdir"] = "/var/spool/cwl"
+ kwargs["docker_outdir"] = "/var/spool/cwl"
kwargs["tmpdir"] = "/tmp"
+ kwargs["docker_tmpdir"] = "/tmp"
elif self.work_api == "jobs":
kwargs["outdir"] = "$(task.outdir)"
+ kwargs["docker_outdir"] = "$(task.outdir)"
kwargs["tmpdir"] = "$(task.tmpdir)"
runnerjob = None
runnerjob.run()
return runnerjob.uuid
- if self.work_api == "containers":
- events = arvados.events.subscribe(arvados.api('v1'), [["object_uuid", "is_a", "arvados#container"]], self.on_message)
- if self.work_api == "jobs":
- events = arvados.events.subscribe(arvados.api('v1'), [["object_uuid", "is_a", "arvados#job"]], self.on_message)
+ self.poll_api = arvados.api('v1')
+ self.polling_thread = threading.Thread(target=self.poll_states)
+ self.polling_thread.start()
if runnerjob:
jobiter = iter((runnerjob,))
self.uuid = kwargs.get("cwl_runner_job").get('uuid')
jobiter = tool.job(job_order,
self.output_callback,
- docker_outdir="$(task.outdir)",
**kwargs)
try:
for runnable in jobiter:
if runnable:
- runnable.run(**kwargs)
+ with Perf(logger, "run"):
+ runnable.run(**kwargs)
else:
if self.processes:
self.cond.wait(1)
while self.processes:
self.cond.wait(1)
- events.close()
except UnsupportedRequirement:
raise
except:
body={"priority": "0"}).execute(num_retries=self.num_retries)
finally:
self.cond.release()
+ self.stop_polling.set()
+ self.polling_thread.join()
if self.final_status == "UnsupportedRequirement":
raise UnsupportedRequirement("Check log for details.")
+ if self.final_status != "success":
+ raise WorkflowException("Workflow failed.")
+
if self.final_output is None:
raise WorkflowException("Workflow did not return a result.")
+ if kwargs.get("compute_checksum"):
+ adjustFileObjs(self.final_output, partial(compute_checksums, self.fs_access))
+
return self.final_output
default=True, dest="enable_reuse",
help="")
- parser.add_argument("--project-uuid", type=str, help="Project that will own the workflow jobs, if not provided, will go to home project.")
+ parser.add_argument("--project-uuid", type=str, metavar="UUID", help="Project that will own the workflow jobs, if not provided, will go to home project.")
parser.add_argument("--ignore-docker-for-reuse", action="store_true",
help="Ignore Docker image version when deciding whether to reuse past jobs.",
default=False)
exgroup.add_argument("--local", action="store_false", help="Run workflow on local host (submits jobs to Arvados).",
default=True, dest="submit")
exgroup.add_argument("--create-template", action="store_true", help="Create an Arvados pipeline template.")
+ exgroup.add_argument("--create-workflow", action="store_true", help="Create an Arvados workflow.")
+ exgroup.add_argument("--update-workflow", type=str, metavar="UUID", help="Update existing Arvados workflow with uuid.")
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--wait", action="store_true", help="After submitting workflow runner job, wait for completion.",
default=None, dest="work_api",
help="Select work submission API, one of 'jobs' or 'containers'.")
+ parser.add_argument("--compute-checksum", action="store_true", default=False,
+ help="Compute checksum of contents while collecting outputs",
+ dest="compute_checksum")
+
parser.add_argument("workflow", type=str, nargs="?", default=None, help="The workflow to execute")
parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.")
job_order_object = None
arvargs = parser.parse_args(args)
- if arvargs.create_template and not arvargs.job_order:
+ if (arvargs.create_template or arvargs.create_workflow or arvargs.update_workflow) and not arvargs.job_order:
job_order_object = ({}, "")
try:
return 1
arvargs.conformance_test = None
+ arvargs.use_container = True
return cwltool.main.main(args=arvargs,
stdout=stdout,
executor=runner.arvExecutor,
makeTool=runner.arvMakeTool,
versionfunc=versionstring,
- job_order_object=job_order_object)
+ job_order_object=job_order_object,
+ make_fs_access=partial(CollectionFsAccess, api_client=api_client))