+import logging
+import json
+import os
+
+from cwltool.errors import WorkflowException
+from cwltool.process import get_feature, adjustFiles, UnsupportedRequirement, shortname
+
+import arvados.collection
+
+from .arvdocker import arv_docker_get_image
+from . import done
+from .runner import Runner
+
+logger = logging.getLogger('arvados.cwl-runner')
+
class ArvadosContainer(object):
"""Submit and manage a Crunch job for executing a CWL CommandLineTool."""
self.arvrunner = runner
self.running = False
+ def update_pipeline_component(self, r):
+ pass
+
def run(self, dry_run=False, pull_image=True, **kwargs):
container_request = {
- "command": self.command_line
+ "command": self.command_line,
"owner_uuid": self.arvrunner.project_uuid,
"name": self.name,
- "output_path", "/var/spool/cwl",
- "cwd", "/var/spool/cwl",
- "priority": 1
+ "output_path": "/var/spool/cwl",
+ "cwd": "/var/spool/cwl",
+ "priority": 1,
+ "state": "Committed"
}
runtime_constraints = {}
- mounts = {}
+ mounts = {
+ "/var/spool/cwl": {
+ "kind": "tmp"
+ }
+ }
+
+ for f in self.pathmapper.files():
+ _, p = self.pathmapper.mapper(f)
+ mounts[p] = {
+ "kind": "collection",
+ "portable_data_hash": p[6:]
+ }
if self.generatefiles:
- vwd = arvados.collection.Collection()
+ raise UnsupportedRequirement("Generate files not supported")
+
+ vwd = arvados.collection.Collection(api_client=self.arvrunner.api_client)
container_request["task.vwd"] = {}
for t in self.generatefiles:
if isinstance(self.generatefiles[t], dict):
if self.environment:
container_request["environment"].update(self.environment)
- # TODO, not supported
- #if self.stdin:
- # container_request["task.stdin"] = self.pathmapper.mapper(self.stdin)[1]
+ if self.stdin:
+ raise UnsupportedRequirement("Stdin redirection currently not suppported")
if self.stdout:
mounts["stdout"] = {"kind": "file",
- "path": self.stdout}
+ "path": "/var/spool/cwl/%s" % (self.stdout)}
(docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
if not docker_req:
- docker_req = "arvados/jobs"
+ docker_req = {"dockerImageId": "arvados/jobs"}
container_request["container_image"] = arv_docker_get_image(self.arvrunner.api,
docker_req,
body=container_request
).execute(num_retries=self.arvrunner.num_retries)
- self.arvrunner.jobs[response["uuid"]] = self
+ self.arvrunner.jobs[response["container_uuid"]] = self
- logger.info("Container %s (%s) is %s", self.name, response["uuid"], response["state"])
+ logger.info("Container %s (%s) request state is %s", self.name, response["container_uuid"], response["state"])
- if response["state"] in ("Complete", "Cancelled"):
+ if response["state"] == "Final":
self.done(response)
except Exception as e:
logger.error("Got error %s" % str(e))
try:
outputs = {}
if record["output"]:
- logc = arvados.collection.Collection(record["log"])
- log = logc.open(logc.keys()[0])
- tmpdir = None
- outdir = None
- keepdir = None
- for l in log:
- # Determine the tmpdir, outdir and keepdir paths from
- # the job run. Unfortunately, we can't take the first
- # values we find (which are expected to be near the
- # top) and stop scanning because if the node fails and
- # the job restarts on a different node these values
- # will different runs, and we need to know about the
- # final run that actually produced output.
-
- g = tmpdirre.match(l)
- if g:
- tmpdir = g.group(1)
- g = outdirre.match(l)
- if g:
- outdir = g.group(1)
- g = keepre.match(l)
- if g:
- keepdir = g.group(1)
-
- colname = "Output %s of %s" % (record["output"][0:7], self.name)
-
- # check if collection already exists with same owner, name and content
- collection_exists = self.arvrunner.api.collections().list(
- filters=[["owner_uuid", "=", self.arvrunner.project_uuid],
- ['portable_data_hash', '=', record["output"]],
- ["name", "=", colname]]
- ).execute(num_retries=self.arvrunner.num_retries)
-
- if not collection_exists["items"]:
- # Create a collection located in the same project as the
- # pipeline with the contents of the output.
- # First, get output record.
- collections = self.arvrunner.api.collections().list(
- limit=1,
- filters=[['portable_data_hash', '=', record["output"]]],
- select=["manifest_text"]
- ).execute(num_retries=self.arvrunner.num_retries)
-
- if not collections["items"]:
- raise WorkflowException(
- "Job output '%s' cannot be found on API server" % (
- record["output"]))
-
- # Create new collection in the parent project
- # with the output contents.
- self.arvrunner.api.collections().create(body={
- "owner_uuid": self.arvrunner.project_uuid,
- "name": colname,
- "portable_data_hash": record["output"],
- "manifest_text": collections["items"][0]["manifest_text"]
- }, ensure_unique_name=True).execute(
- num_retries=self.arvrunner.num_retries)
-
- self.builder.outdir = outdir
- self.builder.pathmapper.keepdir = keepdir
- outputs = self.collect_outputs("keep:" + record["output"])
+ outputs = done.done(self, record, "/tmp", "/var/spool/cwl", "/keep")
except WorkflowException as e:
- logger.error("Error while collecting job outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False))
+ logger.error("Error while collecting container outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False))
processStatus = "permanentFail"
except Exception as e:
logger.exception("Got unknown exception while collecting job outputs:")
self.output_callback(outputs, processStatus)
finally:
del self.arvrunner.jobs[record["uuid"]]
+
+
+class RunnerContainer(Runner):
+ """Submit and manage a container that runs arvados-cwl-runner."""
+
+ def arvados_job_spec(self, dry_run=False, pull_image=True, **kwargs):
+ """Create an Arvados job specification for this workflow.
+
+ The returned dict can be used to create a job (i.e., passed as
+ the +body+ argument to jobs().create()), or as a component in
+ a pipeline template or pipeline instance.
+ """
+
+ workflowmapper = super(RunnerContainer, self).arvados_job_spec(dry_run=dry_run, pull_image=pull_image, **kwargs)
+
+ with arvados.collection.Collection(api_client=self.arvrunner.api) as jobobj:
+ with jobobj.open("cwl.input.json", "w") as f:
+ json.dump(self.job_order, f, sort_keys=True, indent=4)
+ jobobj.save_new(owner_uuid=self.arvrunner.project_uuid)
+
+ workflowname = os.path.basename(self.tool.tool["id"])
+ workflowpath = "/var/lib/cwl/workflow/%s" % workflowname
+ workflowcollection = workflowmapper.mapper(self.tool.tool["id"])[1]
+ workflowcollection = workflowcollection[5:workflowcollection.index('/')]
+ jobpath = "/var/lib/cwl/job/cwl.input.json"
+
+ container_image = arv_docker_get_image(self.arvrunner.api,
+ {"dockerImageId": "arvados/jobs"},
+ pull_image,
+ self.arvrunner.project_uuid)
+
+ return {
+ "command": ["arvados-cwl-runner", "--local", "--crunch2", workflowpath, jobpath],
+ "owner_uuid": self.arvrunner.project_uuid,
+ "name": self.name,
+ "output_path": "/var/spool/cwl",
+ "cwd": "/var/spool/cwl",
+ "priority": 1,
+ "state": "Committed",
+ "container_image": container_image,
+ "mounts": {
+ workflowpath: {
+ "kind": "collection",
+ "portable_data_hash": "%s" % workflowcollection
+ },
+ jobpath: {
+ "kind": "collection",
+ "portable_data_hash": "%s/cwl.input.json" % jobobj.portable_data_hash()
+ },
+ "stdout": {
+ "kind": "file",
+ "path": "/var/spool/cwl/cwl.output.json"
+ }
+ },
+ "runtime_constraints": {
+ "vcpus": 1,
+ "ram": 1024*1024*256
+ }
+ }
+
+ def run(self, *args, **kwargs):
+ job_spec = self.arvados_job_spec(*args, **kwargs)
+ job_spec.setdefault("owner_uuid", self.arvrunner.project_uuid)
+
+ response = self.arvrunner.api.container_requests().create(
+ body=job_spec
+ ).execute(num_retries=self.arvrunner.num_retries)
+
+ self.uuid = response["uuid"]
+ self.arvrunner.jobs[response["container_uuid"]] = self
+
+ logger.info("Submitted container %s", response["uuid"])
+
+ if response["state"] in ("Complete", "Failed", "Cancelled"):
+ self.done(response)