import json
import os
+import ruamel.yaml as yaml
+
from cwltool.errors import WorkflowException
-from cwltool.process import get_feature, adjustFiles, UnsupportedRequirement, shortname
+from cwltool.process import get_feature, UnsupportedRequirement, shortname
+from cwltool.pathmapper import adjustFiles
+from cwltool.utils import aslist
import arvados.collection
from .arvdocker import arv_docker_get_image
from . import done
-from .runner import Runner
+from .runner import Runner, arvados_jobs_image
+from .fsaccess import CollectionFetcher
logger = logging.getLogger('arvados.cwl-runner')
"command": self.command_line,
"owner_uuid": self.arvrunner.project_uuid,
"name": self.name,
- "output_path": "/var/spool/cwl",
- "cwd": "/var/spool/cwl",
+ "output_path": self.outdir,
+ "cwd": self.outdir,
"priority": 1,
- "state": "Committed"
+ "state": "Committed",
+ "properties": {}
}
runtime_constraints = {}
mounts = {
- "/var/spool/cwl": {
+ self.outdir: {
"kind": "tmp"
}
}
+ scheduling_parameters = {}
+ dirs = set()
for f in self.pathmapper.files():
- _, p = self.pathmapper.mapper(f)
- mounts[p] = {
- "kind": "collection",
- "portable_data_hash": p[6:]
- }
+ _, p, tp = self.pathmapper.mapper(f)
+ if tp == "Directory" and '/' not in p[6:]:
+ mounts[p] = {
+ "kind": "collection",
+ "portable_data_hash": p[6:]
+ }
+ dirs.add(p[6:])
+ for f in self.pathmapper.files():
+ _, p, tp = self.pathmapper.mapper(f)
+ if p[6:].split("/")[0] not in dirs:
+ mounts[p] = {
+ "kind": "collection",
+ "portable_data_hash": p[6:]
+ }
- if self.generatefiles:
- raise UnsupportedRequirement("Generate files not supported")
+ if self.generatefiles["listing"]:
+ raise UnsupportedRequirement("InitialWorkDirRequirement not supported with --api=containers")
- container_request["environment"] = {"TMPDIR": "/tmp"}
+ container_request["environment"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
if self.environment:
container_request["environment"].update(self.environment)
if self.stdin:
raise UnsupportedRequirement("Stdin redirection currently not suppported")
+ if self.stderr:
+ raise UnsupportedRequirement("Stderr redirection currently not suppported")
+
if self.stdout:
mounts["stdout"] = {"kind": "file",
- "path": "/var/spool/cwl/%s" % (self.stdout)}
+ "path": "%s/%s" % (self.outdir, self.stdout)}
(docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
if not docker_req:
- docker_req = {"dockerImageId": "arvados/jobs"}
+ docker_req = {"dockerImageId": arvados_jobs_image(self.arvrunner)}
container_request["container_image"] = arv_docker_get_image(self.arvrunner.api,
docker_req,
runtime_constraints["vcpus"] = resources.get("cores", 1)
runtime_constraints["ram"] = resources.get("ram") * 2**20
+ api_req, _ = get_feature(self, "http://arvados.org/cwl#APIRequirement")
+ if api_req:
+ runtime_constraints["API"] = True
+
+ runtime_req, _ = get_feature(self, "http://arvados.org/cwl#RuntimeConstraints")
+ if runtime_req:
+ if "keep_cache" in runtime_req:
+ runtime_constraints["keep_cache_ram"] = runtime_req["keep_cache"]
+
+ partition_req, _ = get_feature(self, "http://arvados.org/cwl#PartitionRequirement")
+ if partition_req:
+ scheduling_parameters["partitions"] = aslist(partition_req["partition"])
+
container_request["mounts"] = mounts
container_request["runtime_constraints"] = runtime_constraints
+ container_request["use_existing"] = kwargs.get("enable_reuse", True)
+ container_request["scheduling_parameters"] = scheduling_parameters
+
+ if kwargs.get("runnerjob", "").startswith("arvwf:"):
+ wfuuid = kwargs["runnerjob"][6:kwargs["runnerjob"].index("#")]
+ wfrecord = self.arvrunner.api.workflows().get(uuid=wfuuid).execute(num_retries=self.arvrunner.num_retries)
+ if container_request["name"] == "main":
+ container_request["name"] = wfrecord["name"]
+ container_request["properties"]["template_uuid"] = wfuuid
try:
response = self.arvrunner.api.container_requests().create(
body=container_request
).execute(num_retries=self.arvrunner.num_retries)
- self.arvrunner.processes[response["container_uuid"]] = self
+ self.uuid = response["uuid"]
+ self.arvrunner.processes[self.uuid] = self
- logger.info("Container %s (%s) request state is %s", self.name, response["container_uuid"], response["state"])
+ logger.info("Container request %s (%s) state is %s", self.name, response["uuid"], response["state"])
if response["state"] == "Final":
self.done(response)
def done(self, record):
try:
- if record["state"] == "Complete":
- rcode = record["exit_code"]
+ container = self.arvrunner.api.containers().get(
+ uuid=record["container_uuid"]
+ ).execute(num_retries=self.arvrunner.num_retries)
+ if container["state"] == "Complete":
+ rcode = container["exit_code"]
if self.successCodes and rcode in self.successCodes:
processStatus = "success"
elif self.temporaryFailCodes and rcode in self.temporaryFailCodes:
else:
processStatus = "permanentFail"
- try:
- outputs = {}
- if record["output"]:
- outputs = done.done(self, record, "/tmp", "/var/spool/cwl", "/keep")
- except WorkflowException as e:
- logger.error("Error while collecting container outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False))
- processStatus = "permanentFail"
- except Exception as e:
- logger.exception("Got unknown exception while collecting job outputs:")
- processStatus = "permanentFail"
+ outputs = {}
+ if container["output"]:
+ try:
+ outputs = done.done_outputs(self, container, "/tmp", self.outdir, "/keep")
+ except Exception as e:
+ logger.error("Got error %s" % str(e))
+ self.output_callback({}, "permanentFail")
self.output_callback(outputs, processStatus)
finally:
del self.arvrunner.processes[record["uuid"]]
workflowmapper = super(RunnerContainer, self).arvados_job_spec(dry_run=dry_run, pull_image=pull_image, **kwargs)
- with arvados.collection.Collection(api_client=self.arvrunner.api) as jobobj:
- with jobobj.open("cwl.input.json", "w") as f:
- json.dump(self.job_order, f, sort_keys=True, indent=4)
- jobobj.save_new(owner_uuid=self.arvrunner.project_uuid)
-
- workflowname = os.path.basename(self.tool.tool["id"])
- workflowpath = "/var/lib/cwl/workflow/%s" % workflowname
- workflowcollection = workflowmapper.mapper(self.tool.tool["id"])[1]
- workflowcollection = workflowcollection[5:workflowcollection.index('/')]
- jobpath = "/var/lib/cwl/job/cwl.input.json"
-
- container_image = arv_docker_get_image(self.arvrunner.api,
- {"dockerImageId": "arvados/jobs"},
- pull_image,
- self.arvrunner.project_uuid)
-
- return {
- "command": ["arvados-cwl-runner", "--local", "--api=containers", workflowpath, jobpath],
+ container_req = {
"owner_uuid": self.arvrunner.project_uuid,
"name": self.name,
"output_path": "/var/spool/cwl",
"cwd": "/var/spool/cwl",
"priority": 1,
"state": "Committed",
- "container_image": container_image,
+ "container_image": arvados_jobs_image(self.arvrunner),
"mounts": {
- "/var/lib/cwl/workflow": {
- "kind": "collection",
- "portable_data_hash": "%s" % workflowcollection
- },
- jobpath: {
- "kind": "collection",
- "portable_data_hash": "%s/cwl.input.json" % jobobj.portable_data_hash()
+ "/var/lib/cwl/cwl.input.json": {
+ "kind": "json",
+ "content": self.job_order
},
"stdout": {
"kind": "file",
},
"runtime_constraints": {
"vcpus": 1,
- "ram": 1024*1024*256,
+ "ram": 1024*1024 * self.submit_runner_ram,
"API": True
- }
+ },
+ "properties": {}
}
+ workflowcollection = workflowmapper.mapper(self.tool.tool["id"])[1]
+ if workflowcollection.startswith("keep:"):
+ workflowcollection = workflowcollection[5:workflowcollection.index('/')]
+ workflowname = os.path.basename(self.tool.tool["id"])
+ workflowpath = "/var/lib/cwl/workflow/%s" % workflowname
+ container_req["mounts"]["/var/lib/cwl/workflow"] = {
+ "kind": "collection",
+ "portable_data_hash": "%s" % workflowcollection
+ }
+ elif workflowcollection.startswith("arvwf:"):
+ workflowpath = "/var/lib/cwl/workflow.json#main"
+ wfuuid = workflowcollection[6:workflowcollection.index("#")]
+ wfrecord = self.arvrunner.api.workflows().get(uuid=wfuuid).execute(num_retries=self.arvrunner.num_retries)
+ wfobj = yaml.safe_load(wfrecord["definition"])
+ if container_req["name"].startswith("arvwf:"):
+ container_req["name"] = wfrecord["name"]
+ container_req["mounts"]["/var/lib/cwl/workflow.json"] = {
+ "kind": "json",
+ "json": wfobj
+ }
+ container_req["properties"]["template_uuid"] = wfuuid
+
+ command = ["arvados-cwl-runner", "--local", "--api=containers"]
+ if self.output_name:
+ command.append("--output-name=" + self.output_name)
+
+ if self.output_tags:
+ command.append("--output-tags=" + self.output_tags)
+
+ if self.enable_reuse:
+ command.append("--enable-reuse")
+ else:
+ command.append("--disable-reuse")
+
+ command.extend([workflowpath, "/var/lib/cwl/cwl.input.json"])
+
+ container_req["command"] = command
+
+ return container_req
+
+
def run(self, *args, **kwargs):
kwargs["keepprefix"] = "keep:"
job_spec = self.arvados_job_spec(*args, **kwargs)
).execute(num_retries=self.arvrunner.num_retries)
self.uuid = response["uuid"]
- self.arvrunner.processes[response["container_uuid"]] = self
+ self.arvrunner.processes[self.uuid] = self
logger.info("Submitted container %s", response["uuid"])
- if response["state"] in ("Complete", "Failed", "Cancelled"):
+ if response["state"] == "Final":
self.done(response)
+
+ def done(self, record):
+ try:
+ container = self.arvrunner.api.containers().get(
+ uuid=record["container_uuid"]
+ ).execute(num_retries=self.arvrunner.num_retries)
+ except Exception as e:
+ logger.exception("While getting runner container: %s", e)
+ self.arvrunner.output_callback({}, "permanentFail")
+ del self.arvrunner.processes[record["uuid"]]
+ else:
+ super(RunnerContainer, self).done(container)
+ finally:
+ del self.arvrunner.processes[record["uuid"]]