import json
import time
-from cwltool.process import get_feature, shortname
+from cwltool.process import get_feature, shortname, UnsupportedRequirement
from cwltool.errors import WorkflowException
from cwltool.draft2tool import revmap_file, CommandLineTool
from cwltool.load_tool import fetch_document
from cwltool.builder import Builder
+from cwltool.pathmapper import adjustDirObjs
+
+from schema_salad.sourceline import SourceLine
+
+import ruamel.yaml as yaml
import arvados.collection
from .arvdocker import arv_docker_get_image
-from .runner import Runner, arvados_jobs_image
-from .pathmapper import InitialWorkDirPathMapper
+from .runner import Runner, arvados_jobs_image, packed_workflow, trim_listing, upload_workflow_collection
+from .pathmapper import VwdPathMapper
from .perf import Perf
from . import done
from ._version import __version__
crunchrunner_re = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.(tmpdir|outdir|keep)\)=(.*)")
+crunchrunner_git_commit = 'a3f2cb186e437bfce0031b024b2157b73ed2717d'
+
class ArvadosJob(object):
"""Submit and manage a Crunch job for executing a CWL CommandLineTool."""
keep_client=self.arvrunner.keep_client,
num_retries=self.arvrunner.num_retries)
script_parameters["task.vwd"] = {}
- generatemapper = InitialWorkDirPathMapper([self.generatefiles], "", "",
- separateDirs=False)
+ generatemapper = VwdPathMapper([self.generatefiles], "", "",
+ separateDirs=False)
with Perf(metrics, "createfiles %s" % self.name):
for f, p in generatemapper.items():
with vwd.open(p.target, "w") as n:
n.write(p.resolved.encode("utf-8"))
- with Perf(metrics, "generatefiles.save_new %s" % self.name):
- vwd.save_new()
+ if vwd:
+ with Perf(metrics, "generatefiles.save_new %s" % self.name):
+ vwd.save_new()
for f, p in generatemapper.items():
if p.type == "File":
with Perf(metrics, "arv_docker_get_image %s" % self.name):
(docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
if docker_req and kwargs.get("use_container") is not False:
+ if docker_req.get("dockerOutputDirectory"):
+ raise SourceLine(docker_req, "dockerOutputDirectory", UnsupportedRequirement).makeError(
+ "Option 'dockerOutputDirectory' of DockerRequirement not supported.")
runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image, self.arvrunner.project_uuid)
else:
- runtime_constraints["docker_image"] = arvados_jobs_image(self.arvrunner)
+ runtime_constraints["docker_image"] = "arvados/jobs"
resources = self.builder.resources
if resources is not None:
filters = [["repository", "=", "arvados"],
["script", "=", "crunchrunner"],
- ["script_version", "in git", "9e5b98e8f5f4727856b53447191f9c06e3da2ba6"]]
+ ["script_version", "in git", crunchrunner_git_commit]]
if not self.arvrunner.ignore_docker_for_reuse:
filters.append(["docker_image_locator", "in docker", runtime_constraints["docker_image"]])
"script": "crunchrunner",
"repository": "arvados",
"script_version": "master",
- "minimum_script_version": "9e5b98e8f5f4727856b53447191f9c06e3da2ba6",
+ "minimum_script_version": crunchrunner_git_commit,
"script_parameters": {"tasks": [script_parameters]},
"runtime_constraints": runtime_constraints
},
self.update_pipeline_component(response)
- logger.info("Job %s (%s) is %s", self.name, response["uuid"], response["state"])
-
- if response["state"] in ("Complete", "Failed", "Cancelled"):
+ if response["state"] == "Complete":
+ logger.info("%s reused job %s", self.arvrunner.label(self), response["uuid"])
with Perf(metrics, "done %s" % self.name):
self.done(response)
+ else:
+ logger.info("%s %s is %s", self.arvrunner.label(self), response["uuid"], response["state"])
except Exception as e:
- logger.error("Got error %s" % str(e))
+ logger.exception("%s error" % (self.arvrunner.label(self)))
self.output_callback({}, "permanentFail")
def update_pipeline_component(self, record):
if g:
dirs[g.group(1)] = g.group(2)
+ if processStatus == "permanentFail":
+ done.logtail(logc, logger, "%s error log:" % self.arvrunner.label(self))
+
with Perf(metrics, "output collection %s" % self.name):
outputs = done.done(self, record, dirs["tmpdir"],
dirs["outdir"], dirs["keep"])
except WorkflowException as e:
- logger.error("Error while collecting job outputs:\n%s", e, exc_info=(e if self.arvrunner.debug else False))
+ logger.error("%s unable to collect output from %s:\n%s",
+ self.arvrunner.label(self), record["output"], e, exc_info=(e if self.arvrunner.debug else False))
processStatus = "permanentFail"
- outputs = None
except Exception as e:
- logger.exception("Got unknown exception while collecting job outputs:")
+ logger.exception("Got unknown exception while collecting output for job %s:", self.name)
processStatus = "permanentFail"
- outputs = None
- self.output_callback(outputs, processStatus)
+ # Note: Currently, on error output_callback is expecting an empty dict,
+ # anything else will fail.
+ if not isinstance(outputs, dict):
+ logger.error("Unexpected output type %s '%s'", type(outputs), outputs)
+ outputs = {}
+ processStatus = "permanentFail"
finally:
- del self.arvrunner.processes[record["uuid"]]
-
+ self.output_callback(outputs, processStatus)
+ if record["uuid"] in self.arvrunner.processes:
+ del self.arvrunner.processes[record["uuid"]]
class RunnerJob(Runner):
"""Submit and manage a Crunch job that runs crunch_scripts/cwl-runner."""
a pipeline template or pipeline instance.
"""
- workflowmapper = super(RunnerJob, self).arvados_job_spec(dry_run=dry_run, pull_image=pull_image, **kwargs)
-
- # Need to filter this out, gets added by cwltool when providing
- # parameters on the command line, and arv-run-pipeline-instance doesn't
- # like it.
- if "job_order" in self.job_order:
- del self.job_order["job_order"]
+ if self.tool.tool["id"].startswith("keep:"):
+ self.job_order["cwl:tool"] = self.tool.tool["id"][5:]
+ else:
+ packed = packed_workflow(self.arvrunner, self.tool)
+ wf_pdh = upload_workflow_collection(self.arvrunner, self.name, packed)
+ self.job_order["cwl:tool"] = "%s/workflow.cwl#main" % wf_pdh
- self.job_order["cwl:tool"] = workflowmapper.mapper(self.tool.tool["id"]).target[5:]
+ adjustDirObjs(self.job_order, trim_listing)
if self.output_name:
self.job_order["arv:output_name"] = self.output_name
+ if self.output_tags:
+ self.job_order["arv:output_tags"] = self.output_tags
+
self.job_order["arv:enable_reuse"] = self.enable_reuse
+ if self.on_error:
+ self.job_order["arv:on_error"] = self.on_error
+
return {
"script": "cwl-runner",
- "script_version": __version__,
+ "script_version": "master",
+ "minimum_script_version": "570509ab4d2ef93d870fd2b1f2eab178afb1bad9",
"repository": "arvados",
"script_parameters": self.job_order,
"runtime_constraints": {
- "docker_image": arvados_jobs_image(self.arvrunner)
+ "docker_image": arvados_jobs_image(self.arvrunner, self.jobs_image),
+ "min_ram_mb_per_node": self.submit_runner_ram
}
}
self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().create(
body={
"owner_uuid": self.arvrunner.project_uuid,
- "name": shortname(self.tool.tool["id"]),
+ "name": self.name,
"components": {"cwl-runner": job_spec },
"state": "RunningOnServer"}).execute(num_retries=self.arvrunner.num_retries)
logger.info("Created pipeline %s", self.arvrunner.pipeline["uuid"])
'string': 'text',
}
- def __init__(self, runner, tool, job_order, enable_reuse):
+ def __init__(self, runner, tool, job_order, enable_reuse, uuid,
+ submit_runner_ram=0, name=None):
self.runner = runner
self.tool = tool
self.job = RunnerJob(
tool=tool,
job_order=job_order,
enable_reuse=enable_reuse,
- output_name=None)
+ output_name=None,
+ output_tags=None,
+ submit_runner_ram=submit_runner_ram,
+ name=name)
+ self.uuid = uuid
def pipeline_component_spec(self):
"""Return a component that Workbench and a-r-p-i will understand.
if not isinstance(types, list):
types = [types]
param['required'] = 'null' not in types
- non_null_types = set(types) - set(['null'])
+ non_null_types = [t for t in types if t != "null"]
if len(non_null_types) == 1:
the_type = [c for c in non_null_types][0]
- dataclass = self.type_to_dataclass.get(the_type)
+ dataclass = None
+ if isinstance(the_type, basestring):
+ dataclass = self.type_to_dataclass.get(the_type)
if dataclass:
param['dataclass'] = dataclass
# Note: If we didn't figure out a single appropriate
return spec
def save(self):
- job_spec = self.pipeline_component_spec()
- response = self.runner.api.pipeline_templates().create(body={
+ body = {
"components": {
- self.job.name: job_spec,
+ self.job.name: self.pipeline_component_spec(),
},
"name": self.job.name,
- "owner_uuid": self.runner.project_uuid,
- }, ensure_unique_name=True).execute(num_retries=self.runner.num_retries)
- self.uuid = response["uuid"]
- logger.info("Created template %s", self.uuid)
+ }
+ if self.runner.project_uuid:
+ body["owner_uuid"] = self.runner.project_uuid
+ if self.uuid:
+ self.runner.api.pipeline_templates().update(
+ uuid=self.uuid, body=body).execute(
+ num_retries=self.runner.num_retries)
+ logger.info("Updated template %s", self.uuid)
+ else:
+ self.uuid = self.runner.api.pipeline_templates().create(
+ body=body, ensure_unique_name=True).execute(
+ num_retries=self.runner.num_retries)['uuid']
+ logger.info("Created template %s", self.uuid)