import os
import sys
import threading
+import hashlib
+from functools import partial
import pkg_resources # part of setuptools
from cwltool.errors import WorkflowException
import cwltool.workflow
import arvados
-import arvados.events
import arvados.config
from .arvcontainer import ArvadosContainer, RunnerContainer
from .fsaccess import CollectionFsAccess
from cwltool.process import shortname, UnsupportedRequirement
+from cwltool.pathmapper import adjustFileObjs
+from cwltool.draft2tool import compute_checksums
from arvados.api import OrderedJsonModel
logger = logging.getLogger('arvados.cwl-runner')
self.num_retries = 4
self.uuid = None
self.work_api = work_api
+ self.stop_polling = threading.Event()
+ self.poll_api = None
if self.work_api is None:
# todo: autodetect API to use.
def arvMakeTool(self, toolpath_object, **kwargs):
if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
- return ArvadosCommandTool(self, toolpath_object, work_api=self.work_api, **kwargs)
+ kwargs["work_api"] = self.work_api
+ return ArvadosCommandTool(self, toolpath_object, **kwargs)
else:
return cwltool.workflow.defaultMakeTool(toolpath_object, **kwargs)
finally:
self.cond.release()
+ def poll_states(self):
+ """Poll status of jobs or containers listed in the processes dict.
+
+ Runs in a separate thread.
+ """
+
+ while True:
+ self.stop_polling.wait(15)
+ if self.stop_polling.is_set():
+ break
+ with self.lock:
+ keys = self.processes.keys()
+ if not keys:
+ continue
+
+ if self.work_api == "containers":
+ table = self.poll_api.containers()
+ elif self.work_api == "jobs":
+ table = self.poll_api.jobs()
+
+ try:
+ proc_states = table.list(filters=[["uuid", "in", keys]]).execute(num_retries=self.num_retries)
+ except Exception as e:
+ logger.warn("Error checking states on API server: %s", e)
+ continue
+
+ for p in proc_states["items"]:
+ self.on_message({
+ "object_uuid": p["uuid"],
+ "event_type": "update",
+ "properties": {
+ "new_attributes": p
+ }
+ })
+
def get_uploaded(self):
return self.uploaded.copy()
def add_uploaded(self, src, pair):
self.uploaded[src] = pair
+ def check_writable(self, obj):
+ if isinstance(obj, dict):
+ if obj.get("writable"):
+ raise UnsupportedRequirement("InitialWorkDir feature 'writable: true' not supported")
+ for v in obj.itervalues():
+ self.check_writable(v)
+ if isinstance(obj, list):
+ for v in obj:
+ self.check_writable(v)
+
def arvExecutor(self, tool, job_order, **kwargs):
self.debug = kwargs.get("debug")
+ tool.visit(self.check_writable)
+
if kwargs.get("quiet"):
logger.setLevel(logging.WARN)
logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
useruuid = self.api.users().current().execute()["uuid"]
self.project_uuid = kwargs.get("project_uuid") if kwargs.get("project_uuid") else useruuid
self.pipeline = None
- self.fs_access = CollectionFsAccess(kwargs["basedir"], api_client=self.api)
+ make_fs_access = kwargs.get("make_fs_access") or partial(CollectionFsAccess, api_client=self.api)
+ self.fs_access = make_fs_access(kwargs["basedir"])
if kwargs.get("create_template"):
tmpl = RunnerTemplate(self, tool, job_order, kwargs.get("enable_reuse"))
self.debug = kwargs.get("debug")
self.ignore_docker_for_reuse = kwargs.get("ignore_docker_for_reuse")
- kwargs["fs_access"] = self.fs_access
+ kwargs["make_fs_access"] = make_fs_access
kwargs["enable_reuse"] = kwargs.get("enable_reuse")
kwargs["use_container"] = True
kwargs["tmpdir_prefix"] = "tmp"
kwargs["on_error"] = "continue"
- kwargs["compute_checksum"] = False
+ kwargs["compute_checksum"] = kwargs.get("compute_checksum")
if self.work_api == "containers":
kwargs["outdir"] = "/var/spool/cwl"
kwargs["docker_outdir"] = "/var/spool/cwl"
kwargs["tmpdir"] = "/tmp"
+ kwargs["docker_tmpdir"] = "/tmp"
elif self.work_api == "jobs":
kwargs["outdir"] = "$(task.outdir)"
kwargs["docker_outdir"] = "$(task.outdir)"
runnerjob.run()
return runnerjob.uuid
- arvados.config.settings()["ARVADOS_DISABLE_WEBSOCKETS"] = "1"
-
- if self.work_api == "containers":
- events = arvados.events.subscribe(arvados.api('v1'), [["object_uuid", "is_a", "arvados#container"]], self.on_message)
- if self.work_api == "jobs":
- events = arvados.events.subscribe(arvados.api('v1'), [["object_uuid", "is_a", "arvados#job"]], self.on_message)
+ self.poll_api = arvados.api('v1')
+ self.polling_thread = threading.Thread(target=self.poll_states)
+ self.polling_thread.start()
if runnerjob:
jobiter = iter((runnerjob,))
while self.processes:
self.cond.wait(1)
- events.close()
except UnsupportedRequirement:
raise
except:
body={"priority": "0"}).execute(num_retries=self.num_retries)
finally:
self.cond.release()
+ self.stop_polling.set()
+ self.polling_thread.join()
if self.final_status == "UnsupportedRequirement":
raise UnsupportedRequirement("Check log for details.")
if self.final_output is None:
raise WorkflowException("Workflow did not return a result.")
+ if kwargs.get("compute_checksum"):
+ adjustFileObjs(self.final_output, partial(compute_checksums, self.fs_access))
+
return self.final_output
default=None, dest="work_api",
help="Select work submission API, one of 'jobs' or 'containers'.")
+ parser.add_argument("--compute-checksum", action="store_true", default=False,
+ help="Compute checksum of contents while collecting outputs",
+ dest="compute_checksum")
+
parser.add_argument("workflow", type=str, nargs="?", default=None, help="The workflow to execute")
parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.")
executor=runner.arvExecutor,
makeTool=runner.arvMakeTool,
versionfunc=versionstring,
- job_order_object=job_order_object)
+ job_order_object=job_order_object,
+ make_fs_access=partial(CollectionFsAccess, api_client=api_client))