import re
from functools import partial
import time
+import urllib
from cwltool.errors import WorkflowException
import cwltool.workflow
from arvados.errors import ApiError
import arvados_cwl.util
-from .arvcontainer import RunnerContainer
-from .arvjob import RunnerJob, RunnerTemplate
-from .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps
+from .arvcontainer import RunnerContainer, cleanup_name_for_collection
+from .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps, make_builder
from .arvtool import ArvadosCommandTool, validate_cluster_target, ArvadosExpressionTool
from .arvworkflow import ArvadosWorkflow, upload_workflow
from .fsaccess import CollectionFsAccess, CollectionFetcher, collectionResolver, CollectionCache, pdh_size
from .perf import Perf
from .pathmapper import NoFollowPathMapper
-from .task_queue import TaskQueue
+from cwltool.task_queue import TaskQueue
from .context import ArvLoadingContext, ArvRuntimeContext
from ._version import __version__
from cwltool.process import shortname, UnsupportedRequirement, use_custom_schema
-from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, get_listing, visit_class
+from cwltool.utils import adjustFileObjs, adjustDirObjs, get_listing, visit_class, aslist
from cwltool.command_line_tool import compute_checksums
from cwltool.load_tool import load_tool
class ArvCwlExecutor(object):
- """Execute a CWL tool or workflow, submit work (using either jobs or
- containers API), wait for them to complete, and report output.
+ """Execute a CWL tool or workflow, submit work (using containers API),
+ wait for them to complete, and report output.
"""
arvargs=None,
keep_client=None,
num_retries=4,
- thread_count=4):
+ thread_count=4,
+ stdout=sys.stdout):
if arvargs is None:
arvargs = argparse.Namespace()
self.poll_interval = 12
self.loadingContext = None
self.should_estimate_cache_size = True
+ self.fs_access = None
+ self.secret_store = None
+ self.stdout = stdout
if keep_client is not None:
self.keep_client = keep_client
num_retries=self.num_retries)
self.work_api = None
- expected_api = ["jobs", "containers"]
+ expected_api = ["containers"]
for api in expected_api:
try:
methods = self.api._rootDesc.get('resources')[api]['methods']
raise Exception("Unsupported API '%s', expected one of %s" % (arvargs.work_api, expected_api))
if self.work_api == "jobs":
- logger.warning("""
+ logger.error("""
*******************************
-Using the deprecated 'jobs' API.
-
-To get rid of this warning:
-
-Users: read about migrating at
-http://doc.arvados.org/user/cwl/cwl-style.html#migrate
-and use the option --api=containers
-
-Admins: configure the cluster to disable the 'jobs' API as described at:
-http://doc.arvados.org/install/install-api-server.html#disable_api_methods
+The 'jobs' API is no longer supported.
*******************************""")
+ exit(1)
self.loadingContext = ArvLoadingContext(vars(arvargs))
self.loadingContext.fetcher_constructor = self.fetcher_constructor
handler = RuntimeStatusLoggingHandler(self.runtime_status_update)
root_logger.addHandler(handler)
- self.runtimeContext = ArvRuntimeContext(vars(arvargs))
- self.runtimeContext.make_fs_access = partial(CollectionFsAccess,
+ self.toplevel_runtimeContext = ArvRuntimeContext(vars(arvargs))
+ self.toplevel_runtimeContext.make_fs_access = partial(CollectionFsAccess,
collection_cache=self.collection_cache)
- validate_cluster_target(self, self.runtimeContext)
+ validate_cluster_target(self, self.toplevel_runtimeContext)
def arv_make_tool(self, toolpath_object, loadingContext):
activity statuses, for example in the RuntimeStatusLoggingHandler.
"""
with self.workflow_eval_lock:
- current = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
+ current = None
+ try:
+ current = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
+ except Exception as e:
+ logger.info("Couldn't get current container: %s", e)
if current is None:
return
runtime_status = current.get('runtime_status', {})
- # In case of status being an error, only report the first one.
- if kind == 'error':
- if not runtime_status.get('error'):
- runtime_status.update({
- 'error': message
- })
- if detail is not None:
- runtime_status.update({
- 'errorDetail': detail
- })
- # Further errors are only mentioned as a count.
- else:
- # Get anything before an optional 'and N more' string.
- try:
- error_msg = re.match(
- r'^(.*?)(?=\s*\(and \d+ more\)|$)', runtime_status.get('error')).groups()[0]
- more_failures = re.match(
- r'.*\(and (\d+) more\)', runtime_status.get('error'))
- except TypeError:
- # Ignore tests stubbing errors
- return
- if more_failures:
- failure_qty = int(more_failures.groups()[0])
- runtime_status.update({
- 'error': "%s (and %d more)" % (error_msg, failure_qty+1)
- })
- else:
- runtime_status.update({
- 'error': "%s (and 1 more)" % error_msg
- })
- elif kind in ['warning', 'activity']:
- # Record the last warning/activity status without regard of
- # previous occurences.
+ if kind in ('error', 'warning'):
+ updatemessage = runtime_status.get(kind, "")
+ if not updatemessage:
+ updatemessage = message
+
+ # Subsequent messages tacked on in detail
+ updatedetail = runtime_status.get(kind+'Detail', "")
+ maxlines = 40
+ if updatedetail.count("\n") < maxlines:
+ if updatedetail:
+ updatedetail += "\n"
+ updatedetail += message + "\n"
+
+ if detail:
+ updatedetail += detail + "\n"
+
+ if updatedetail.count("\n") >= maxlines:
+ updatedetail += "\nSome messages may have been omitted. Check the full log."
+
runtime_status.update({
- kind: message
+ kind: updatemessage,
+ kind+'Detail': updatedetail,
})
- if detail is not None:
- runtime_status.update({
- kind+"Detail": detail
- })
else:
# Ignore any other status kind
return
return "[%s %s]" % (self.work_api[0:-1], obj.name)
def poll_states(self):
- """Poll status of jobs or containers listed in the processes dict.
+ """Poll status of containers listed in the processes dict.
Runs in a separate thread.
"""
begin_poll = time.time()
if self.work_api == "containers":
table = self.poll_api.container_requests()
- elif self.work_api == "jobs":
- table = self.poll_api.jobs()
pageSize = self.poll_api._rootDesc.get('maxItemsPerResponse', 1000)
def check_features(self, obj, parentfield=""):
if isinstance(obj, dict):
- if obj.get("writable") and self.work_api != "containers":
- raise SourceLine(obj, "writable", UnsupportedRequirement).makeError("InitialWorkDir feature 'writable: true' not supported with --api=jobs")
if obj.get("class") == "DockerRequirement":
if obj.get("dockerOutputDirectory"):
- if self.work_api != "containers":
- raise SourceLine(obj, "dockerOutputDirectory", UnsupportedRequirement).makeError(
- "Option 'dockerOutputDirectory' of DockerRequirement not supported with --api=jobs.")
if not obj.get("dockerOutputDirectory").startswith('/'):
raise SourceLine(obj, "dockerOutputDirectory", validate.ValidationException).makeError(
"Option 'dockerOutputDirectory' must be an absolute path.")
- if obj.get("class") == "http://commonwl.org/cwltool#Secrets" and self.work_api != "containers":
- raise SourceLine(obj, "class", UnsupportedRequirement).makeError("Secrets not supported with --api=jobs")
if obj.get("class") == "InplaceUpdateRequirement":
if obj["inplaceUpdate"] and parentfield == "requirements":
raise SourceLine(obj, "class", UnsupportedRequirement).makeError("InplaceUpdateRequirement not supported for keep collections.")
with SourceLine(obj, i, UnsupportedRequirement, logger.isEnabledFor(logging.DEBUG)):
self.check_features(v, parentfield=parentfield)
- def make_output_collection(self, name, storage_classes, tagsString, outputObj):
+ def make_output_collection(self, name, storage_classes, tagsString, output_properties, outputObj):
outputObj = copy.deepcopy(outputObj)
files = []
srccollection = sp[0][5:]
try:
reader = self.collection_cache.get(srccollection)
- srcpath = "/".join(sp[1:]) if len(sp) > 1 else "."
+ srcpath = urllib.parse.unquote("/".join(sp[1:]) if len(sp) > 1 else ".")
final.copy(srcpath, v.target, source_collection=reader, overwrite=False)
except arvados.errors.ArgumentError as e:
logger.error("Creating CollectionReader for '%s' '%s': %s", k, v, e)
res = str(json.dumps(outputObj, sort_keys=True, indent=4, separators=(',',': '), ensure_ascii=False))
f.write(res)
- final.save_new(name=name, owner_uuid=self.project_uuid, storage_classes=storage_classes, ensure_unique_name=True)
+
+ final.save_new(name=name, owner_uuid=self.project_uuid, storage_classes=storage_classes,
+ ensure_unique_name=True, properties=output_properties)
logger.info("Final output collection %s \"%s\" (%s)", final.portable_data_hash(),
final.api_response()["name"],
self.api.containers().update(uuid=current['uuid'],
body={
'output': self.final_output_collection.portable_data_hash(),
+ 'output_properties': self.final_output_collection.get_properties(),
}).execute(num_retries=self.num_retries)
self.api.collections().update(uuid=self.final_output_collection.manifest_locator(),
body={
}).execute(num_retries=self.num_retries)
except Exception:
logger.exception("Setting container output")
- return
- elif self.work_api == "jobs" and "TASK_UUID" in os.environ:
- self.api.job_tasks().update(uuid=os.environ["TASK_UUID"],
- body={
- 'output': self.final_output_collection.portable_data_hash(),
- 'success': self.final_status == "success",
- 'progress':1.0
- }).execute(num_retries=self.num_retries)
+ raise
def apply_reqs(self, job_order_object, tool):
if "https://w3id.org/cwl/cwl#requirements" in job_order_object:
raise WorkflowException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
- "can set the cwlVersion to v1.1.0-dev1 or greater and re-run with "
+ "can set the cwlVersion to v1.1 or greater and re-run with "
"--enable-dev.")
job_reqs = job_order_object["https://w3id.org/cwl/cwl#requirements"]
for req in job_reqs:
tool.requirements.append(req)
- def arv_executor(self, tool, job_order, runtimeContext, logger=None):
+ def arv_executor(self, updated_tool, job_order, runtimeContext, logger=None):
self.debug = runtimeContext.debug
- tool.visit(self.check_features)
+ workbench1 = self.api.config()["Services"]["Workbench1"]["ExternalURL"]
+ workbench2 = self.api.config()["Services"]["Workbench2"]["ExternalURL"]
+ controller = self.api.config()["Services"]["Controller"]["ExternalURL"]
+ logger.info("Using cluster %s (%s)", self.api.config()["ClusterID"], workbench2 or workbench1 or controller)
+
+ updated_tool.visit(self.check_features)
- self.project_uuid = runtimeContext.project_uuid
self.pipeline = None
self.fs_access = runtimeContext.make_fs_access(runtimeContext.basedir)
self.secret_store = runtimeContext.secret_store
if runtimeContext.submit_request_uuid and self.work_api != "containers":
raise Exception("--submit-request-uuid requires containers API, but using '{}' api".format(self.work_api))
+ runtimeContext = runtimeContext.copy()
+
+ default_storage_classes = ",".join([k for k,v in self.api.config().get("StorageClasses", {"default": {"Default": True}}).items() if v.get("Default") is True])
+ if runtimeContext.storage_classes == "default":
+ runtimeContext.storage_classes = default_storage_classes
+ if runtimeContext.intermediate_storage_classes == "default":
+ runtimeContext.intermediate_storage_classes = default_storage_classes
+
if not runtimeContext.name:
- runtimeContext.name = self.name = tool.tool.get("label") or tool.metadata.get("label") or os.path.basename(tool.tool["id"])
+ runtimeContext.name = self.name = updated_tool.tool.get("label") or updated_tool.metadata.get("label") or os.path.basename(updated_tool.tool["id"])
+
+ if runtimeContext.copy_deps is None and (runtimeContext.create_workflow or runtimeContext.update_workflow):
+ # When creating or updating workflow record, by default
+ # always copy dependencies and ensure Docker images are up
+ # to date.
+ runtimeContext.copy_deps = True
+ runtimeContext.match_local_docker = True
+
+ if runtimeContext.update_workflow and self.project_uuid is None:
+ # If we are updating a workflow, make sure anything that
+ # gets uploaded goes into the same parent project, unless
+ # an alternate --project-uuid was provided.
+ existing_wf = self.api.workflows().get(uuid=runtimeContext.update_workflow).execute()
+ runtimeContext.project_uuid = existing_wf["owner_uuid"]
+
+ self.project_uuid = runtimeContext.project_uuid
# Upload local file references in the job order.
- job_order = upload_job_order(self, "%s input" % runtimeContext.name,
- tool, job_order)
+ with Perf(metrics, "upload_job_order"):
+ job_order = upload_job_order(self, "%s input" % runtimeContext.name,
+ updated_tool, job_order, runtimeContext)
+
+ # the last clause means: if it is a command line tool, and we
+ # are going to wait for the result, and always_submit_runner
+ # is false, then we don't submit a runner process.
submitting = (runtimeContext.update_workflow or
runtimeContext.create_workflow or
(runtimeContext.submit and not
- (tool.tool["class"] == "CommandLineTool" and
+ (updated_tool.tool["class"] == "CommandLineTool" and
runtimeContext.wait and
not runtimeContext.always_submit_runner)))
loadingContext = self.loadingContext.copy()
loadingContext.do_validate = False
- loadingContext.do_update = False
+ loadingContext.disable_js_validation = True
if submitting:
+ loadingContext.do_update = False
# Document may have been auto-updated. Reload the original
# document with updating disabled because we want to
- # submit the original document, not the auto-updated one.
- tool = load_tool(tool.tool["id"], loadingContext)
+ # submit the document with its original CWL version, not
+ # the auto-updated one.
+ with Perf(metrics, "load_tool original"):
+ tool = load_tool(updated_tool.tool["id"], loadingContext)
+ else:
+ tool = updated_tool
# Upload direct dependencies of workflow steps, get back mapping of files to keep references.
# Also uploads docker images.
- merged_map = upload_workflow_deps(self, tool)
+ logger.info("Uploading workflow dependencies")
+ with Perf(metrics, "upload_workflow_deps"):
+ merged_map = upload_workflow_deps(self, tool, runtimeContext)
# Recreate process object (ArvadosWorkflow or
# ArvadosCommandTool) because tool document may have been
loadingContext.loader = tool.doc_loader
loadingContext.avsc_names = tool.doc_schema
loadingContext.metadata = tool.metadata
- tool = load_tool(tool.tool, loadingContext)
+ with Perf(metrics, "load_tool"):
+ tool = load_tool(tool.tool, loadingContext)
- existing_uuid = runtimeContext.update_workflow
- if existing_uuid or runtimeContext.create_workflow:
+ if runtimeContext.update_workflow or runtimeContext.create_workflow:
# Create a pipeline template or workflow record and exit.
- if self.work_api == "jobs":
- tmpl = RunnerTemplate(self, tool, job_order,
- runtimeContext.enable_reuse,
- uuid=existing_uuid,
- submit_runner_ram=runtimeContext.submit_runner_ram,
- name=runtimeContext.name,
- merged_map=merged_map,
- loadingContext=loadingContext)
- tmpl.save()
- # cwltool.main will write our return value to stdout.
- return (tmpl.uuid, "success")
- elif self.work_api == "containers":
- return (upload_workflow(self, tool, job_order,
- self.project_uuid,
- uuid=existing_uuid,
- submit_runner_ram=runtimeContext.submit_runner_ram,
- name=runtimeContext.name,
- merged_map=merged_map),
- "success")
+ if self.work_api == "containers":
+ uuid = upload_workflow(self, tool, job_order,
+ runtimeContext.project_uuid,
+ runtimeContext,
+ uuid=runtimeContext.update_workflow,
+ submit_runner_ram=runtimeContext.submit_runner_ram,
+ name=runtimeContext.name,
+ merged_map=merged_map,
+ submit_runner_image=runtimeContext.submit_runner_image)
+ self.stdout.write(uuid + "\n")
+ return (None, "success")
self.apply_reqs(job_order, tool)
self.ignore_docker_for_reuse = runtimeContext.ignore_docker_for_reuse
self.eval_timeout = runtimeContext.eval_timeout
- runtimeContext = runtimeContext.copy()
runtimeContext.use_container = True
runtimeContext.tmpdir_prefix = "tmp"
runtimeContext.work_api = self.work_api
+ if not self.output_name:
+ self.output_name = "Output from workflow %s" % runtimeContext.name
+
+ self.output_name = cleanup_name_for_collection(self.output_name)
+
if self.work_api == "containers":
if self.ignore_docker_for_reuse:
raise Exception("--ignore-docker-for-reuse not supported with containers API.")
runtimeContext.docker_outdir = "/var/spool/cwl"
runtimeContext.tmpdir = "/tmp"
runtimeContext.docker_tmpdir = "/tmp"
- elif self.work_api == "jobs":
- if runtimeContext.priority != DEFAULT_PRIORITY:
- raise Exception("--priority not implemented for jobs API.")
- runtimeContext.outdir = "$(task.outdir)"
- runtimeContext.docker_outdir = "$(task.outdir)"
- runtimeContext.tmpdir = "$(task.tmpdir)"
if runtimeContext.priority < 1 or runtimeContext.priority > 1000:
raise Exception("--priority must be in the range 1..1000.")
if runtimeContext.submit:
# Submit a runner job to run the workflow for us.
if self.work_api == "containers":
- if tool.tool["class"] == "CommandLineTool" and runtimeContext.wait and (not runtimeContext.always_submit_runner):
- runtimeContext.runnerjob = tool.tool["id"]
+ if submitting:
+ tool = RunnerContainer(self, updated_tool,
+ tool, loadingContext, runtimeContext.enable_reuse,
+ self.output_name,
+ self.output_tags,
+ submit_runner_ram=runtimeContext.submit_runner_ram,
+ name=runtimeContext.name,
+ on_error=runtimeContext.on_error,
+ submit_runner_image=runtimeContext.submit_runner_image,
+ intermediate_output_ttl=runtimeContext.intermediate_output_ttl,
+ merged_map=merged_map,
+ priority=runtimeContext.priority,
+ secret_store=self.secret_store,
+ collection_cache_size=runtimeContext.collection_cache_size,
+ collection_cache_is_default=self.should_estimate_cache_size)
else:
- tool = RunnerContainer(self, tool, loadingContext, runtimeContext.enable_reuse,
- self.output_name,
- self.output_tags,
- submit_runner_ram=runtimeContext.submit_runner_ram,
- name=runtimeContext.name,
- on_error=runtimeContext.on_error,
- submit_runner_image=runtimeContext.submit_runner_image,
- intermediate_output_ttl=runtimeContext.intermediate_output_ttl,
- merged_map=merged_map,
- priority=runtimeContext.priority,
- secret_store=self.secret_store,
- collection_cache_size=runtimeContext.collection_cache_size,
- collection_cache_is_default=self.should_estimate_cache_size)
- elif self.work_api == "jobs":
- tool = RunnerJob(self, tool, loadingContext, runtimeContext.enable_reuse,
- self.output_name,
- self.output_tags,
- submit_runner_ram=runtimeContext.submit_runner_ram,
- name=runtimeContext.name,
- on_error=runtimeContext.on_error,
- submit_runner_image=runtimeContext.submit_runner_image,
- merged_map=merged_map)
- elif runtimeContext.cwl_runner_job is None and self.work_api == "jobs":
- # Create pipeline for local run
- self.pipeline = self.api.pipeline_instances().create(
- body={
- "owner_uuid": self.project_uuid,
- "name": runtimeContext.name if runtimeContext.name else shortname(tool.tool["id"]),
- "components": {},
- "state": "RunningOnClient"}).execute(num_retries=self.num_retries)
- logger.info("Pipeline instance %s", self.pipeline["uuid"])
+ runtimeContext.runnerjob = tool.tool["id"]
if runtimeContext.cwl_runner_job is not None:
self.uuid = runtimeContext.cwl_runner_job.get('uuid')
if runtimeContext.submit and not runtimeContext.wait:
runnerjob = next(jobiter)
runnerjob.run(runtimeContext)
- return (runnerjob.uuid, "success")
+ self.stdout.write(runnerjob.uuid+"\n")
+ return (None, "success")
current_container = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
if current_container:
if self.pipeline:
self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
body={"state": "Failed"}).execute(num_retries=self.num_retries)
- if runtimeContext.submit and isinstance(tool, Runner):
- runnerjob = tool
- if runnerjob.uuid and self.work_api == "containers":
- self.api.container_requests().update(uuid=runnerjob.uuid,
- body={"priority": "0"}).execute(num_retries=self.num_retries)
+
+ if self.work_api == "containers" and not current_container:
+ # Not running in a crunch container, so cancel any outstanding processes.
+ for p in self.processes:
+ try:
+ self.api.container_requests().update(uuid=p,
+ body={"priority": "0"}
+ ).execute(num_retries=self.num_retries)
+ except Exception:
+ pass
finally:
self.workflow_eval_lock.release()
self.task_queue.drain()
if runtimeContext.submit and isinstance(tool, Runner):
logger.info("Final output collection %s", tool.final_output)
+ if workbench2 or workbench1:
+ logger.info("Output at %scollections/%s", workbench2 or workbench1, tool.final_output)
else:
- if self.output_name is None:
- self.output_name = "Output of %s" % (shortname(tool.tool["id"]))
if self.output_tags is None:
self.output_tags = ""
- storage_classes = runtimeContext.storage_classes.strip().split(",")
- self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, storage_classes, self.output_tags, self.final_output)
+ storage_classes = ""
+ storage_class_req, _ = tool.get_requirement("http://arvados.org/cwl#OutputStorageClass")
+ if storage_class_req and storage_class_req.get("finalStorageClass"):
+ storage_classes = aslist(storage_class_req["finalStorageClass"])
+ else:
+ storage_classes = runtimeContext.storage_classes.strip().split(",")
+
+ output_properties = {}
+ output_properties_req, _ = tool.get_requirement("http://arvados.org/cwl#OutputCollectionProperties")
+ if output_properties_req:
+ builder = make_builder(job_order, tool.hints, tool.requirements, runtimeContext, tool.metadata)
+ for pr in output_properties_req["outputProperties"]:
+ output_properties[pr["propertyName"]] = builder.do_eval(pr["propertyValue"])
+
+ self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, storage_classes,
+ self.output_tags, output_properties,
+ self.final_output)
self.set_crunch_output()
if runtimeContext.compute_checksum: