import time
import datetime
import ciso8601
+import uuid
import ruamel.yaml as yaml
from cwltool.errors import WorkflowException
from cwltool.process import get_feature, UnsupportedRequirement, shortname
-from cwltool.pathmapper import adjustFileObjs, adjustDirObjs
+from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, visit_class
from cwltool.utils import aslist
import arvados.collection
from .arvdocker import arv_docker_get_image
from . import done
-from .runner import Runner, arvados_jobs_image, packed_workflow, trim_anonymous_location
+from .runner import Runner, arvados_jobs_image, packed_workflow, trim_anonymous_location, remove_redundant_fields
from .fsaccess import CollectionFetcher
from .pathmapper import NoFollowPathMapper, trim_listing
from .perf import Perf
pass
def run(self, dry_run=False, pull_image=True, **kwargs):
+ # ArvadosCommandTool subclasses from cwltool.CommandLineTool,
+ # which calls makeJobRunner() to get a new ArvadosContainer
+ # object. The fields that define execution such as
+ # command_line, environment, etc are set on the
+ # ArvadosContainer object by CommandLineTool.job() before
+ # run() is called.
+
container_request = {
"command": self.command_line,
- "owner_uuid": self.arvrunner.project_uuid,
"name": self.name,
"output_path": self.outdir,
"cwd": self.outdir,
- "priority": 1,
+ "priority": kwargs.get("priority"),
"state": "Committed",
"properties": {},
}
runtime_constraints = {}
+ if self.arvrunner.project_uuid:
+ container_request["owner_uuid"] = self.arvrunner.project_uuid
+
+ if self.arvrunner.secret_store.has_secret(self.command_line):
+ raise WorkflowException("Secret material leaked on command line, only file literals may contain secrets")
+
+ if self.arvrunner.secret_store.has_secret(self.environment):
+ raise WorkflowException("Secret material leaked in environment, only file literals may contain secrets")
+
resources = self.builder.resources
if resources is not None:
runtime_constraints["vcpus"] = resources.get("cores", 1)
"capacity": resources.get("tmpdirSize", 0) * 2**20
}
}
+ secret_mounts = {}
scheduling_parameters = {}
rf = [self.pathmapper.mapper(f) for f in self.pathmapper.referenced_files]
generatemapper = NoFollowPathMapper([self.generatefiles], "", "",
separateDirs=False)
+ sorteditems = sorted(generatemapper.items(), None, key=lambda n: n[1].target)
+
+ logger.debug("generatemapper is %s", sorteditems)
+
with Perf(metrics, "createfiles %s" % self.name):
- for f, p in generatemapper.items():
+ for f, p in sorteditems:
if not p.target:
pass
- elif p.type in ("File", "Directory"):
- source, path = self.arvrunner.fs_access.get_collection(p.resolved)
- vwd.copy(path, p.target, source_collection=source)
+ elif p.type in ("File", "Directory", "WritableFile", "WritableDirectory"):
+ if p.resolved.startswith("_:"):
+ vwd.mkdirs(p.target)
+ else:
+ source, path = self.arvrunner.fs_access.get_collection(p.resolved)
+ vwd.copy(path, p.target, source_collection=source)
elif p.type == "CreateFile":
- with vwd.open(p.target, "w") as n:
- n.write(p.resolved.encode("utf-8"))
+ if self.arvrunner.secret_store.has_secret(p.resolved):
+ secret_mounts["%s/%s" % (self.outdir, p.target)] = {
+ "kind": "text",
+ "content": self.arvrunner.secret_store.retrieve(p.resolved)
+ }
+ else:
+ with vwd.open(p.target, "w") as n:
+ n.write(p.resolved.encode("utf-8"))
+
+ def keepemptydirs(p):
+ if isinstance(p, arvados.collection.RichCollectionBase):
+ if len(p) == 0:
+ p.open(".keep", "w").close()
+ else:
+ for c in p:
+ keepemptydirs(p[c])
+
+ keepemptydirs(vwd)
with Perf(metrics, "generatefiles.save_new %s" % self.name):
vwd.save_new()
- for f, p in generatemapper.items():
- if not p.target:
+ prev = None
+ for f, p in sorteditems:
+ if (not p.target or self.arvrunner.secret_store.has_secret(p.resolved) or
+ (prev is not None and p.target.startswith(prev))):
continue
mountpoint = "%s/%s" % (self.outdir, p.target)
mounts[mountpoint] = {"kind": "collection",
"portable_data_hash": vwd.portable_data_hash(),
"path": p.target}
+ if p.type.startswith("Writable"):
+ mounts[mountpoint]["writable"] = True
+ prev = p.target + "/"
container_request["environment"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
if self.environment:
self.output_ttl = self.arvrunner.intermediate_output_ttl
if self.output_ttl < 0:
- raise WorkflowError("Invalid value %d for output_ttl, cannot be less than zero" % container_request["output_ttl"])
+ raise WorkflowException("Invalid value %d for output_ttl, cannot be less than zero" % container_request["output_ttl"])
container_request["output_ttl"] = self.output_ttl
container_request["mounts"] = mounts
+ container_request["secret_mounts"] = secret_mounts
container_request["runtime_constraints"] = runtime_constraints
container_request["scheduling_parameters"] = scheduling_parameters
container_request["name"] = wfrecord["name"]
container_request["properties"]["template_uuid"] = wfuuid
+ self.output_callback = self.arvrunner.get_wrapped_callback(self.output_callback)
+
try:
- response = self.arvrunner.api.container_requests().create(
- body=container_request
- ).execute(num_retries=self.arvrunner.num_retries)
+ if kwargs.get("submit_request_uuid"):
+ response = self.arvrunner.api.container_requests().update(
+ uuid=kwargs["submit_request_uuid"],
+ body=container_request
+ ).execute(num_retries=self.arvrunner.num_retries)
+ else:
+ response = self.arvrunner.api.container_requests().create(
+ body=container_request
+ ).execute(num_retries=self.arvrunner.num_retries)
self.uuid = response["uuid"]
- self.arvrunner.processes[self.uuid] = self
+ self.arvrunner.process_submitted(self)
if response["state"] == "Final":
logger.info("%s reused container %s", self.arvrunner.label(self), response["container_uuid"])
- self.done(response)
else:
logger.info("%s %s state is %s", self.arvrunner.label(self), response["uuid"], response["state"])
except Exception as e:
processStatus = "permanentFail"
finally:
self.output_callback(outputs, processStatus)
- if record["uuid"] in self.arvrunner.processes:
- del self.arvrunner.processes[record["uuid"]]
class RunnerContainer(Runner):
"""
adjustDirObjs(self.job_order, trim_listing)
- adjustFileObjs(self.job_order, trim_anonymous_location)
- adjustDirObjs(self.job_order, trim_anonymous_location)
+ visit_class(self.job_order, ("File", "Directory"), trim_anonymous_location)
+ visit_class(self.job_order, ("File", "Directory"), remove_redundant_fields)
+
+ secret_mounts = {}
+ for param in sorted(self.job_order.keys()):
+ if self.secret_store.has_secret(self.job_order[param]):
+ mnt = "/secrets/s%d" % len(secret_mounts)
+ secret_mounts[mnt] = {
+ "kind": "text",
+ "content": self.secret_store.retrieve(self.job_order[param])
+ }
+ self.job_order[param] = {"$include": mnt}
container_req = {
- "owner_uuid": self.arvrunner.project_uuid,
"name": self.name,
"output_path": "/var/spool/cwl",
"cwd": "/var/spool/cwl",
- "priority": 1,
+ "priority": self.priority,
"state": "Committed",
"container_image": arvados_jobs_image(self.arvrunner, self.jobs_image),
"mounts": {
"writable": True
}
},
+ "secret_mounts": secret_mounts,
"runtime_constraints": {
"vcpus": 1,
"ram": 1024*1024 * self.submit_runner_ram,
"API": True
},
+ "use_existing": self.enable_reuse,
"properties": {}
}
"portable_data_hash": "%s" % workflowcollection
}
else:
- packed = packed_workflow(self.arvrunner, self.tool)
+ packed = packed_workflow(self.arvrunner, self.tool, self.merged_map)
workflowpath = "/var/lib/cwl/workflow.json#main"
container_req["mounts"]["/var/lib/cwl/workflow.json"] = {
"kind": "json",
if self.tool.tool.get("id", "").startswith("arvwf:"):
container_req["properties"]["template_uuid"] = self.tool.tool["id"][6:33]
- command = ["arvados-cwl-runner", "--local", "--api=containers", "--no-log-timestamps"]
+
+ # --local means execute the workflow instead of submitting a container request
+ # --api=containers means use the containers API
+ # --no-log-timestamps means don't add timestamps (the logging infrastructure does this)
+ # --disable-validate because we already validated so don't need to do it again
+ # --eval-timeout is the timeout for javascript invocation
+ # --parallel-task-count is the number of threads to use for job submission
+ # --enable/disable-reuse sets desired job reuse
+ command = ["arvados-cwl-runner",
+ "--local",
+ "--api=containers",
+ "--no-log-timestamps",
+ "--disable-validate",
+ "--eval-timeout=%s" % self.arvrunner.eval_timeout,
+ "--thread-count=%s" % self.arvrunner.thread_count,
+ "--enable-reuse" if self.enable_reuse else "--disable-reuse"]
+
if self.output_name:
command.append("--output-name=" + self.output_name)
container_req["output_name"] = self.output_name
if kwargs.get("debug"):
command.append("--debug")
- if self.enable_reuse:
- command.append("--enable-reuse")
- else:
- command.append("--disable-reuse")
-
if self.on_error:
command.append("--on-error=" + self.on_error)
return container_req
- def run(self, *args, **kwargs):
+ def run(self, **kwargs):
kwargs["keepprefix"] = "keep:"
- job_spec = self.arvados_job_spec(*args, **kwargs)
- job_spec.setdefault("owner_uuid", self.arvrunner.project_uuid)
+ job_spec = self.arvados_job_spec(**kwargs)
+ if self.arvrunner.project_uuid:
+ job_spec["owner_uuid"] = self.arvrunner.project_uuid
- response = self.arvrunner.api.container_requests().create(
- body=job_spec
- ).execute(num_retries=self.arvrunner.num_retries)
+ if kwargs.get("submit_request_uuid"):
+ response = self.arvrunner.api.container_requests().update(
+ uuid=kwargs["submit_request_uuid"],
+ body=job_spec
+ ).execute(num_retries=self.arvrunner.num_retries)
+ else:
+ response = self.arvrunner.api.container_requests().create(
+ body=job_spec
+ ).execute(num_retries=self.arvrunner.num_retries)
self.uuid = response["uuid"]
- self.arvrunner.processes[self.uuid] = self
+ self.arvrunner.process_submitted(self)
logger.info("%s submitted container %s", self.arvrunner.label(self), response["uuid"])
- if response["state"] == "Final":
- self.done(response)
-
def done(self, record):
try:
container = self.arvrunner.api.containers().get(
self.arvrunner.output_callback({}, "permanentFail")
else:
super(RunnerContainer, self).done(container)
- finally:
- if record["uuid"] in self.arvrunner.processes:
- del self.arvrunner.processes[record["uuid"]]