import logging
import re
import copy
+import json
from cwltool.process import get_feature, shortname
from cwltool.errors import WorkflowException
-from cwltool.draft2tool import revmap_file, remove_hostfs, CommandLineTool
+from cwltool.draft2tool import revmap_file, CommandLineTool
from cwltool.load_tool import fetch_document
from cwltool.builder import Builder
from .arvdocker import arv_docker_get_image
from .runner import Runner
+from .pathmapper import InitialWorkDirPathMapper
+from .perf import Perf
from . import done
logger = logging.getLogger('arvados.cwl-runner')
def __init__(self, runner):
self.arvrunner = runner
self.running = False
+ self.uuid = None
def run(self, dry_run=False, pull_image=True, **kwargs):
script_parameters = {
}
runtime_constraints = {}
- if self.generatefiles:
+ if self.generatefiles["listing"]:
vwd = arvados.collection.Collection()
script_parameters["task.vwd"] = {}
- for t in self.generatefiles:
- if isinstance(self.generatefiles[t], dict):
- src, rest = self.arvrunner.fs_access.get_collection(self.generatefiles[t]["path"].replace("$(task.keep)/", "keep:"))
- vwd.copy(rest, t, source_collection=src)
- else:
- with vwd.open(t, "w") as f:
- f.write(self.generatefiles[t].encode('utf-8'))
+ generatemapper = InitialWorkDirPathMapper([self.generatefiles], "", "",
+ separateDirs=False)
+ for f, p in generatemapper.items():
+ if p.type == "CreateFile":
+ with vwd.open(p.target, "w") as n:
+ n.write(p.resolved.encode("utf-8"))
vwd.save_new()
- for t in self.generatefiles:
- script_parameters["task.vwd"][t] = "$(task.keep)/%s/%s" % (vwd.portable_data_hash(), t)
+ for f, p in generatemapper.items():
+ if p.type == "File":
+ script_parameters["task.vwd"][p.target] = p.resolved
+ if p.type == "CreateFile":
+ script_parameters["task.vwd"][p.target] = "$(task.keep)/%s/%s" % (vwd.portable_data_hash(), p.target)
- script_parameters["task.env"] = {"TMPDIR": "$(task.tmpdir)"}
+ script_parameters["task.env"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
if self.environment:
script_parameters["task.env"].update(self.environment)
if self.stdin:
- script_parameters["task.stdin"] = self.pathmapper.mapper(self.stdin)[1]
+ script_parameters["task.stdin"] = self.stdin
if self.stdout:
script_parameters["task.stdout"] = self.stdout
+ if self.stderr:
+ script_parameters["task.stderr"] = self.stderr
+
+ if self.successCodes:
+ script_parameters["task.successCodes"] = self.successCodes
+ if self.temporaryFailCodes:
+ script_parameters["task.temporaryFailCodes"] = self.temporaryFailCodes
+ if self.permanentFailCodes:
+ script_parameters["task.permanentFailCodes"] = self.permanentFailCodes
+
(docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
if docker_req and kwargs.get("use_container") is not False:
runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image, self.arvrunner.project_uuid)
filters.append(["docker_image_locator", "in docker", runtime_constraints["docker_image"]])
try:
- response = self.arvrunner.api.jobs().create(
- body={
- "owner_uuid": self.arvrunner.project_uuid,
- "script": "crunchrunner",
- "repository": "arvados",
- "script_version": "master",
- "minimum_script_version": "9e5b98e8f5f4727856b53447191f9c06e3da2ba6",
- "script_parameters": {"tasks": [script_parameters]},
- "runtime_constraints": runtime_constraints
- },
- filters=filters,
- find_or_create=kwargs.get("enable_reuse", True)
- ).execute(num_retries=self.arvrunner.num_retries)
-
- self.arvrunner.jobs[response["uuid"]] = self
+ with Perf(logger, "create %s" % self.name):
+ response = self.arvrunner.api.jobs().create(
+ body={
+ "owner_uuid": self.arvrunner.project_uuid,
+ "script": "crunchrunner",
+ "repository": "arvados",
+ "script_version": "master",
+ "minimum_script_version": "9e5b98e8f5f4727856b53447191f9c06e3da2ba6",
+ "script_parameters": {"tasks": [script_parameters]},
+ "runtime_constraints": runtime_constraints
+ },
+ filters=filters,
+ find_or_create=kwargs.get("enable_reuse", True)
+ ).execute(num_retries=self.arvrunner.num_retries)
+
+ self.arvrunner.processes[response["uuid"]] = self
self.update_pipeline_component(response)
logger.info("Job %s (%s) is %s", self.name, response["uuid"], response["state"])
if response["state"] in ("Complete", "Failed", "Cancelled"):
- self.done(response)
+ with Perf(logger, "done %s" % self.name):
+ self.done(response)
except Exception as e:
logger.error("Got error %s" % str(e))
self.output_callback({}, "permanentFail")
def update_pipeline_component(self, record):
if self.arvrunner.pipeline:
self.arvrunner.pipeline["components"][self.name] = {"job": record}
- self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"],
+ with Perf(logger, "update_pipeline_component %s" % self.name):
+ self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"],
body={
"components": self.arvrunner.pipeline["components"]
}).execute(num_retries=self.arvrunner.num_retries)
outputs = {}
try:
if record["output"]:
- logc = arvados.collection.Collection(record["log"])
- log = logc.open(logc.keys()[0])
- tmpdir = None
- outdir = None
- keepdir = None
- for l in log:
- # Determine the tmpdir, outdir and keepdir paths from
- # the job run. Unfortunately, we can't take the first
- # values we find (which are expected to be near the
- # top) and stop scanning because if the node fails and
- # the job restarts on a different node these values
- # will different runs, and we need to know about the
- # final run that actually produced output.
-
- g = tmpdirre.match(l)
- if g:
- tmpdir = g.group(1)
- g = outdirre.match(l)
- if g:
- outdir = g.group(1)
- g = keepre.match(l)
- if g:
- keepdir = g.group(1)
-
- outputs = done.done(self, record, tmpdir, outdir, keepdir)
+ with Perf(logger, "inspect log %s" % self.name):
+ logc = arvados.collection.Collection(record["log"])
+ log = logc.open(logc.keys()[0])
+ tmpdir = None
+ outdir = None
+ keepdir = None
+ for l in log:
+ # Determine the tmpdir, outdir and keepdir paths from
+ # the job run. Unfortunately, we can't take the first
+ # values we find (which are expected to be near the
+ # top) and stop scanning because if the node fails and
+ # the job restarts on a different node these values
+ # will different runs, and we need to know about the
+ # final run that actually produced output.
+
+ g = tmpdirre.match(l)
+ if g:
+ tmpdir = g.group(1)
+ g = outdirre.match(l)
+ if g:
+ outdir = g.group(1)
+ g = keepre.match(l)
+ if g:
+ keepdir = g.group(1)
+
+ with Perf(logger, "output collection %s" % self.name):
+ outputs = done.done(self, record, tmpdir, outdir, keepdir)
except WorkflowException as e:
logger.error("Error while collecting job outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False))
processStatus = "permanentFail"
+ outputs = None
except Exception as e:
logger.exception("Got unknown exception while collecting job outputs:")
processStatus = "permanentFail"
+ outputs = None
self.output_callback(outputs, processStatus)
finally:
- del self.arvrunner.jobs[record["uuid"]]
+ del self.arvrunner.processes[record["uuid"]]
class RunnerJob(Runner):
workflowmapper = super(RunnerJob, self).arvados_job_spec(dry_run=dry_run, pull_image=pull_image, **kwargs)
- self.job_order["cwl:tool"] = workflowmapper.mapper(self.tool.tool["id"])[1]
+ self.job_order["cwl:tool"] = workflowmapper.mapper(self.tool.tool["id"]).target[5:]
return {
"script": "cwl-runner",
"script_version": "master",
).execute(num_retries=self.arvrunner.num_retries)
self.uuid = response["uuid"]
- self.arvrunner.jobs[self.uuid] = self
+ self.arvrunner.processes[self.uuid] = self
logger.info("Submitted job %s", response["uuid"])
if kwargs.get("submit"):
- self.pipeline = self.arvrunner.api.pipeline_instances().create(
+ self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().create(
body={
"owner_uuid": self.arvrunner.project_uuid,
"name": shortname(self.tool.tool["id"]),
type_to_dataclass = {
'boolean': 'boolean',
'File': 'File',
+ 'Directory': 'Collection',
'float': 'number',
'int': 'number',
'string': 'text',
# Title and description...
title = param.pop('label', '')
- descr = param.pop('description', '').rstrip('\n')
+ descr = param.pop('doc', '').rstrip('\n')
if title:
param['title'] = title
if descr:
pass
elif not isinstance(value, dict):
param['value'] = value
- elif param.get('dataclass') == 'File' and value.get('path'):
- param['value'] = value['path']
+ elif param.get('dataclass') in ('File', 'Collection') and value.get('location'):
+ param['value'] = value['location'][5:]
spec['script_parameters'][param_id] = param
spec['script_parameters']['cwl:tool'] = job_params['cwl:tool']