1 # Copyright (C) The Arvados Authors. All rights reserved.
3 # SPDX-License-Identifier: Apache-2.0
5 from __future__ import division
6 from builtins import next
7 from builtins import object
8 from builtins import str
9 from future.utils import viewvalues, viewitems
19 from functools import partial
24 from cwltool.errors import WorkflowException
25 import cwltool.workflow
26 from schema_salad.sourceline import SourceLine
27 import schema_salad.validate as validate
28 from schema_salad.ref_resolver import file_uri, uri_file_path
32 from arvados.keep import KeepClient
33 from arvados.errors import ApiError
35 import arvados_cwl.util
36 from .arvcontainer import RunnerContainer, cleanup_name_for_collection
37 from .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps, make_builder
38 from .arvtool import ArvadosCommandTool, validate_cluster_target, ArvadosExpressionTool
39 from .arvworkflow import ArvadosWorkflow, upload_workflow
40 from .fsaccess import CollectionFsAccess, CollectionFetcher, collectionResolver, CollectionCache, pdh_size
41 from .perf import Perf
42 from .pathmapper import NoFollowPathMapper
43 from cwltool.task_queue import TaskQueue
44 from .context import ArvLoadingContext, ArvRuntimeContext
45 from ._version import __version__
47 from cwltool.process import shortname, UnsupportedRequirement, use_custom_schema
48 from cwltool.utils import adjustFileObjs, adjustDirObjs, get_listing, visit_class, aslist
49 from cwltool.command_line_tool import compute_checksums
50 from cwltool.load_tool import load_tool
52 logger = logging.getLogger('arvados.cwl-runner')
53 metrics = logging.getLogger('arvados.cwl-runner.metrics')
55 DEFAULT_PRIORITY = 500
57 class RuntimeStatusLoggingHandler(logging.Handler):
59 Intercepts logging calls and report them as runtime statuses on runner
62 def __init__(self, runtime_status_update_func):
63 super(RuntimeStatusLoggingHandler, self).__init__()
64 self.runtime_status_update = runtime_status_update_func
65 self.updatingRuntimeStatus = False
67 def emit(self, record):
69 if record.levelno >= logging.ERROR:
71 elif record.levelno >= logging.WARNING:
73 if kind is not None and self.updatingRuntimeStatus is not True:
74 self.updatingRuntimeStatus = True
76 log_msg = record.getMessage()
78 # If the logged message is multi-line, use its first line as status
79 # and the rest as detail.
80 status, detail = log_msg.split('\n', 1)
81 self.runtime_status_update(
83 "%s: %s" % (record.name, status),
87 self.runtime_status_update(
89 "%s: %s" % (record.name, record.getMessage())
92 self.updatingRuntimeStatus = False
95 class ArvCwlExecutor(object):
96 """Execute a CWL tool or workflow, submit work (using containers API),
97 wait for them to complete, and report output.
101 def __init__(self, api_client,
109 arvargs = argparse.Namespace()
110 arvargs.work_api = None
111 arvargs.output_name = None
112 arvargs.output_tags = None
113 arvargs.thread_count = 1
114 arvargs.collection_cache_size = None
116 self.api = api_client
118 self.workflow_eval_lock = threading.Condition(threading.RLock())
119 self.final_output = None
120 self.final_status = None
121 self.num_retries = num_retries
123 self.stop_polling = threading.Event()
126 self.final_output_collection = None
127 self.output_name = arvargs.output_name
128 self.output_tags = arvargs.output_tags
129 self.project_uuid = None
130 self.intermediate_output_ttl = 0
131 self.intermediate_output_collections = []
132 self.trash_intermediate = False
133 self.thread_count = arvargs.thread_count
134 self.poll_interval = 12
135 self.loadingContext = None
136 self.should_estimate_cache_size = True
137 self.fs_access = None
138 self.secret_store = None
141 if keep_client is not None:
142 self.keep_client = keep_client
144 self.keep_client = arvados.keep.KeepClient(api_client=self.api, num_retries=self.num_retries)
146 if arvargs.collection_cache_size:
147 collection_cache_size = arvargs.collection_cache_size*1024*1024
148 self.should_estimate_cache_size = False
150 collection_cache_size = 256*1024*1024
152 self.collection_cache = CollectionCache(self.api, self.keep_client, self.num_retries,
153 cap=collection_cache_size)
155 self.fetcher_constructor = partial(CollectionFetcher,
157 fs_access=CollectionFsAccess("", collection_cache=self.collection_cache),
158 num_retries=self.num_retries)
161 expected_api = ["containers"]
162 for api in expected_api:
164 methods = self.api._rootDesc.get('resources')[api]['methods']
165 if ('httpMethod' in methods['create'] and
166 (arvargs.work_api == api or arvargs.work_api is None)):
172 if not self.work_api:
173 if arvargs.work_api is None:
174 raise Exception("No supported APIs")
176 raise Exception("Unsupported API '%s', expected one of %s" % (arvargs.work_api, expected_api))
178 if self.work_api == "jobs":
180 *******************************
181 The 'jobs' API is no longer supported.
182 *******************************""")
185 self.loadingContext = ArvLoadingContext(vars(arvargs))
186 self.loadingContext.fetcher_constructor = self.fetcher_constructor
187 self.loadingContext.resolver = partial(collectionResolver, self.api, num_retries=self.num_retries)
188 self.loadingContext.construct_tool_object = self.arv_make_tool
190 # Add a custom logging handler to the root logger for runtime status reporting
191 # if running inside a container
192 if arvados_cwl.util.get_current_container(self.api, self.num_retries, logger):
193 root_logger = logging.getLogger('')
195 # Remove existing RuntimeStatusLoggingHandlers if they exist
196 handlers = [h for h in root_logger.handlers if not isinstance(h, RuntimeStatusLoggingHandler)]
197 root_logger.handlers = handlers
199 handler = RuntimeStatusLoggingHandler(self.runtime_status_update)
200 root_logger.addHandler(handler)
202 self.toplevel_runtimeContext = ArvRuntimeContext(vars(arvargs))
203 self.toplevel_runtimeContext.make_fs_access = partial(CollectionFsAccess,
204 collection_cache=self.collection_cache)
206 validate_cluster_target(self, self.toplevel_runtimeContext)
209 def arv_make_tool(self, toolpath_object, loadingContext):
210 if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
211 return ArvadosCommandTool(self, toolpath_object, loadingContext)
212 elif "class" in toolpath_object and toolpath_object["class"] == "Workflow":
213 return ArvadosWorkflow(self, toolpath_object, loadingContext)
214 elif "class" in toolpath_object and toolpath_object["class"] == "ExpressionTool":
215 return ArvadosExpressionTool(self, toolpath_object, loadingContext)
217 raise Exception("Unknown tool %s" % toolpath_object.get("class"))
219 def output_callback(self, out, processStatus):
220 with self.workflow_eval_lock:
221 if processStatus == "success":
222 logger.info("Overall process status is %s", processStatus)
225 logger.error("Overall process status is %s", processStatus)
228 self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
229 body={"state": state}).execute(num_retries=self.num_retries)
230 self.final_status = processStatus
231 self.final_output = out
232 self.workflow_eval_lock.notifyAll()
235 def start_run(self, runnable, runtimeContext):
236 self.task_queue.add(partial(runnable.run, runtimeContext),
237 self.workflow_eval_lock, self.stop_polling)
239 def process_submitted(self, container):
240 with self.workflow_eval_lock:
241 self.processes[container.uuid] = container
243 def process_done(self, uuid, record):
244 with self.workflow_eval_lock:
245 j = self.processes[uuid]
246 logger.info("%s %s is %s", self.label(j), uuid, record["state"])
247 self.task_queue.add(partial(j.done, record),
248 self.workflow_eval_lock, self.stop_polling)
249 del self.processes[uuid]
251 def runtime_status_update(self, kind, message, detail=None):
253 Updates the runtime_status field on the runner container.
254 Called when there's a need to report errors, warnings or just
255 activity statuses, for example in the RuntimeStatusLoggingHandler.
258 if kind not in ('error', 'warning'):
259 # Ignore any other status kind
262 with self.workflow_eval_lock:
265 current = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
266 except Exception as e:
267 logger.info("Couldn't get current container: %s", e)
270 runtime_status = current.get('runtime_status', {})
272 original_updatemessage = updatemessage = runtime_status.get(kind, "")
273 if not updatemessage:
274 updatemessage = message
276 # Subsequent messages tacked on in detail
277 original_updatedetail = updatedetail = runtime_status.get(kind+'Detail', "")
279 if updatedetail.count("\n") < maxlines:
282 updatedetail += message + "\n"
285 updatedetail += detail + "\n"
287 if updatedetail.count("\n") >= maxlines:
288 updatedetail += "\nSome messages may have been omitted. Check the full log."
290 if updatemessage == original_updatemessage and updatedetail == original_updatedetail:
291 # don't waste time doing an update if nothing changed
292 # (usually because we exceeded the max lines)
295 runtime_status.update({
297 kind+'Detail': updatedetail,
301 self.api.containers().update(uuid=current['uuid'],
303 'runtime_status': runtime_status,
304 }).execute(num_retries=self.num_retries)
305 except Exception as e:
306 logger.info("Couldn't update runtime_status: %s", e)
308 def wrapped_callback(self, cb, obj, st):
309 with self.workflow_eval_lock:
311 self.workflow_eval_lock.notifyAll()
313 def get_wrapped_callback(self, cb):
314 return partial(self.wrapped_callback, cb)
316 def on_message(self, event):
317 if event.get("object_uuid") in self.processes and event["event_type"] == "update":
318 uuid = event["object_uuid"]
319 if event["properties"]["new_attributes"]["state"] == "Running":
320 with self.workflow_eval_lock:
321 j = self.processes[uuid]
322 if j.running is False:
324 j.update_pipeline_component(event["properties"]["new_attributes"])
325 logger.info("%s %s is Running", self.label(j), uuid)
326 elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled", "Final"):
327 self.process_done(uuid, event["properties"]["new_attributes"])
329 def label(self, obj):
330 return "[%s %s]" % (self.work_api[0:-1], obj.name)
332 def poll_states(self):
333 """Poll status of containers listed in the processes dict.
335 Runs in a separate thread.
339 remain_wait = self.poll_interval
342 self.stop_polling.wait(remain_wait)
343 if self.stop_polling.is_set():
345 with self.workflow_eval_lock:
346 keys = list(self.processes)
348 remain_wait = self.poll_interval
351 begin_poll = time.time()
352 if self.work_api == "containers":
353 table = self.poll_api.container_requests()
355 pageSize = self.poll_api._rootDesc.get('maxItemsPerResponse', 1000)
358 page = keys[:pageSize]
360 proc_states = table.list(filters=[["uuid", "in", page]]).execute(num_retries=self.num_retries)
362 logger.exception("Error checking states on API server: %s")
363 remain_wait = self.poll_interval
366 for p in proc_states["items"]:
368 "object_uuid": p["uuid"],
369 "event_type": "update",
374 keys = keys[pageSize:]
376 finish_poll = time.time()
377 remain_wait = self.poll_interval - (finish_poll - begin_poll)
379 logger.exception("Fatal error in state polling thread.")
380 with self.workflow_eval_lock:
381 self.processes.clear()
382 self.workflow_eval_lock.notifyAll()
384 self.stop_polling.set()
386 def add_intermediate_output(self, uuid):
388 self.intermediate_output_collections.append(uuid)
390 def trash_intermediate_output(self):
391 logger.info("Cleaning up intermediate output collections")
392 for i in self.intermediate_output_collections:
394 self.api.collections().delete(uuid=i).execute(num_retries=self.num_retries)
396 logger.warning("Failed to delete intermediate output: %s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
397 except (KeyboardInterrupt, SystemExit):
400 def check_features(self, obj, parentfield=""):
401 if isinstance(obj, dict):
402 if obj.get("class") == "DockerRequirement":
403 if obj.get("dockerOutputDirectory"):
404 if not obj.get("dockerOutputDirectory").startswith('/'):
405 raise SourceLine(obj, "dockerOutputDirectory", validate.ValidationException).makeError(
406 "Option 'dockerOutputDirectory' must be an absolute path.")
407 if obj.get("class") == "InplaceUpdateRequirement":
408 if obj["inplaceUpdate"] and parentfield == "requirements":
409 raise SourceLine(obj, "class", UnsupportedRequirement).makeError("InplaceUpdateRequirement not supported for keep collections.")
410 for k,v in viewitems(obj):
411 self.check_features(v, parentfield=k)
412 elif isinstance(obj, list):
413 for i,v in enumerate(obj):
414 with SourceLine(obj, i, UnsupportedRequirement, logger.isEnabledFor(logging.DEBUG)):
415 self.check_features(v, parentfield=parentfield)
417 def make_output_collection(self, name, storage_classes, tagsString, output_properties, outputObj):
418 outputObj = copy.deepcopy(outputObj)
421 def capture(fileobj):
422 files.append(fileobj)
424 adjustDirObjs(outputObj, capture)
425 adjustFileObjs(outputObj, capture)
427 generatemapper = NoFollowPathMapper(files, "", "", separateDirs=False)
429 final = arvados.collection.Collection(api_client=self.api,
430 keep_client=self.keep_client,
431 num_retries=self.num_retries)
433 for k,v in generatemapper.items():
434 if v.type == "Directory" and v.resolved.startswith("_:"):
436 if v.type == "CreateFile" and (k.startswith("_:") or v.resolved.startswith("_:")):
437 with final.open(v.target, "wb") as f:
438 f.write(v.resolved.encode("utf-8"))
441 if not v.resolved.startswith("keep:"):
442 raise Exception("Output source is not in keep or a literal")
443 sp = v.resolved.split("/")
444 srccollection = sp[0][5:]
446 reader = self.collection_cache.get(srccollection)
447 srcpath = urllib.parse.unquote("/".join(sp[1:]) if len(sp) > 1 else ".")
448 final.copy(srcpath, v.target, source_collection=reader, overwrite=False)
449 except arvados.errors.ArgumentError as e:
450 logger.error("Creating CollectionReader for '%s' '%s': %s", k, v, e)
453 logger.error("While preparing output collection: %s", e)
456 def rewrite(fileobj):
457 fileobj["location"] = generatemapper.mapper(fileobj["location"]).target
458 for k in ("listing", "contents", "nameext", "nameroot", "dirname"):
462 adjustDirObjs(outputObj, rewrite)
463 adjustFileObjs(outputObj, rewrite)
465 with final.open("cwl.output.json", "w") as f:
466 res = str(json.dumps(outputObj, sort_keys=True, indent=4, separators=(',',': '), ensure_ascii=False))
470 final.save_new(name=name, owner_uuid=self.project_uuid, storage_classes=storage_classes,
471 ensure_unique_name=True, properties=output_properties)
473 logger.info("Final output collection %s \"%s\" (%s)", final.portable_data_hash(),
474 final.api_response()["name"],
475 final.manifest_locator())
477 final_uuid = final.manifest_locator()
478 tags = tagsString.split(',')
480 self.api.links().create(body={
481 "head_uuid": final_uuid, "link_class": "tag", "name": tag
482 }).execute(num_retries=self.num_retries)
484 def finalcollection(fileobj):
485 fileobj["location"] = "keep:%s/%s" % (final.portable_data_hash(), fileobj["location"])
487 adjustDirObjs(outputObj, finalcollection)
488 adjustFileObjs(outputObj, finalcollection)
490 return (outputObj, final)
492 def set_crunch_output(self):
493 if self.work_api == "containers":
494 current = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
498 self.api.containers().update(uuid=current['uuid'],
500 'output': self.final_output_collection.portable_data_hash(),
501 'output_properties': self.final_output_collection.get_properties(),
502 }).execute(num_retries=self.num_retries)
503 self.api.collections().update(uuid=self.final_output_collection.manifest_locator(),
506 }).execute(num_retries=self.num_retries)
508 logger.exception("Setting container output")
511 def apply_reqs(self, job_order_object, tool):
512 if "https://w3id.org/cwl/cwl#requirements" in job_order_object:
513 if tool.metadata.get("http://commonwl.org/cwltool#original_cwlVersion") == 'v1.0':
514 raise WorkflowException(
515 "`cwl:requirements` in the input object is not part of CWL "
516 "v1.0. You can adjust to use `cwltool:overrides` instead; or you "
517 "can set the cwlVersion to v1.1 or greater and re-run with "
519 job_reqs = job_order_object["https://w3id.org/cwl/cwl#requirements"]
521 tool.requirements.append(req)
523 def get_git_info(self, tool):
524 in_a_git_repo = False
527 if tool.tool["id"].startswith("file://"):
528 # check if git is installed
530 cwd = os.path.dirname(uri_file_path(tool.tool["id"]))
531 subprocess.run(["git", "log", "--format=%H", "-n1", "HEAD"], cwd=cwd, check=True, capture_output=True, text=True)
533 except Exception as e:
539 git_commit = subprocess.run(["git", "log", "--format=%H", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
540 git_date = subprocess.run(["git", "log", "--format=%cD", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
541 git_committer = subprocess.run(["git", "log", "--format=%cn <%ce>", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
542 git_branch = subprocess.run(["git", "branch", "--show-current"], cwd=cwd, capture_output=True, text=True).stdout
543 git_origin = subprocess.run(["git", "remote", "get-url", "origin"], cwd=cwd, capture_output=True, text=True).stdout
544 git_status = subprocess.run(["git", "status", "--untracked-files=no", "--porcelain"], cwd=cwd, capture_output=True, text=True).stdout
547 "http://arvados.org/cwl#gitCommit": git_commit.strip(),
548 "http://arvados.org/cwl#gitDate": git_date.strip(),
549 "http://arvados.org/cwl#gitCommitter": git_committer.strip(),
550 "http://arvados.org/cwl#gitBranch": git_branch.strip(),
551 "http://arvados.org/cwl#gitOrigin": git_origin.strip(),
552 "http://arvados.org/cwl#gitStatus": git_status.strip(),
555 for g in ("http://arvados.org/cwl#gitCommit",
556 "http://arvados.org/cwl#gitDate",
557 "http://arvados.org/cwl#gitCommitter",
558 "http://arvados.org/cwl#gitBranch",
559 "http://arvados.org/cwl#gitOrigin",
560 "http://arvados.org/cwl#gitStatus"):
561 if g in tool.metadata:
562 gitproperties[g] = tool.metadata[g]
566 def arv_executor(self, updated_tool, job_order, runtimeContext, logger=None):
567 self.debug = runtimeContext.debug
569 git_info = self.get_git_info(updated_tool)
571 logger.info("Provenance of %s", updated_tool.tool["id"])
574 logger.info(" %s: %s", g.split("#", 1)[1], git_info[g])
576 workbench1 = self.api.config()["Services"]["Workbench1"]["ExternalURL"]
577 workbench2 = self.api.config()["Services"]["Workbench2"]["ExternalURL"]
578 controller = self.api.config()["Services"]["Controller"]["ExternalURL"]
579 logger.info("Using cluster %s (%s)", self.api.config()["ClusterID"], workbench2 or workbench1 or controller)
581 updated_tool.visit(self.check_features)
584 self.fs_access = runtimeContext.make_fs_access(runtimeContext.basedir)
585 self.secret_store = runtimeContext.secret_store
587 self.trash_intermediate = runtimeContext.trash_intermediate
588 if self.trash_intermediate and self.work_api != "containers":
589 raise Exception("--trash-intermediate is only supported with --api=containers.")
591 self.intermediate_output_ttl = runtimeContext.intermediate_output_ttl
592 if self.intermediate_output_ttl and self.work_api != "containers":
593 raise Exception("--intermediate-output-ttl is only supported with --api=containers.")
594 if self.intermediate_output_ttl < 0:
595 raise Exception("Invalid value %d for --intermediate-output-ttl, cannot be less than zero" % self.intermediate_output_ttl)
597 if runtimeContext.submit_request_uuid and self.work_api != "containers":
598 raise Exception("--submit-request-uuid requires containers API, but using '{}' api".format(self.work_api))
600 runtimeContext = runtimeContext.copy()
602 default_storage_classes = ",".join([k for k,v in self.api.config().get("StorageClasses", {"default": {"Default": True}}).items() if v.get("Default") is True])
603 if runtimeContext.storage_classes == "default":
604 runtimeContext.storage_classes = default_storage_classes
605 if runtimeContext.intermediate_storage_classes == "default":
606 runtimeContext.intermediate_storage_classes = default_storage_classes
608 if not runtimeContext.name:
609 runtimeContext.name = self.name = updated_tool.tool.get("label") or updated_tool.metadata.get("label") or os.path.basename(updated_tool.tool["id"])
611 if runtimeContext.copy_deps is None and (runtimeContext.create_workflow or runtimeContext.update_workflow):
612 # When creating or updating workflow record, by default
613 # always copy dependencies and ensure Docker images are up
615 runtimeContext.copy_deps = True
616 runtimeContext.match_local_docker = True
618 if runtimeContext.update_workflow and self.project_uuid is None:
619 # If we are updating a workflow, make sure anything that
620 # gets uploaded goes into the same parent project, unless
621 # an alternate --project-uuid was provided.
622 existing_wf = self.api.workflows().get(uuid=runtimeContext.update_workflow).execute()
623 runtimeContext.project_uuid = existing_wf["owner_uuid"]
625 self.project_uuid = runtimeContext.project_uuid
627 # Upload local file references in the job order.
628 with Perf(metrics, "upload_job_order"):
629 job_order = upload_job_order(self, "%s input" % runtimeContext.name,
630 updated_tool, job_order, runtimeContext)
632 # the last clause means: if it is a command line tool, and we
633 # are going to wait for the result, and always_submit_runner
634 # is false, then we don't submit a runner process.
636 submitting = (runtimeContext.update_workflow or
637 runtimeContext.create_workflow or
638 (runtimeContext.submit and not
639 (updated_tool.tool["class"] == "CommandLineTool" and
640 runtimeContext.wait and
641 not runtimeContext.always_submit_runner)))
643 loadingContext = self.loadingContext.copy()
644 loadingContext.do_validate = False
645 loadingContext.disable_js_validation = True
647 loadingContext.do_update = False
648 # Document may have been auto-updated. Reload the original
649 # document with updating disabled because we want to
650 # submit the document with its original CWL version, not
651 # the auto-updated one.
652 with Perf(metrics, "load_tool original"):
653 tool = load_tool(updated_tool.tool["id"], loadingContext)
657 # Upload direct dependencies of workflow steps, get back mapping of files to keep references.
658 # Also uploads docker images.
659 logger.info("Uploading workflow dependencies")
660 with Perf(metrics, "upload_workflow_deps"):
661 merged_map = upload_workflow_deps(self, tool, runtimeContext)
663 # Recreate process object (ArvadosWorkflow or
664 # ArvadosCommandTool) because tool document may have been
665 # updated by upload_workflow_deps in ways that modify
666 # inheritance of hints or requirements.
667 loadingContext.loader = tool.doc_loader
668 loadingContext.avsc_names = tool.doc_schema
669 loadingContext.metadata = tool.metadata
670 with Perf(metrics, "load_tool"):
671 tool = load_tool(tool.tool, loadingContext)
673 if runtimeContext.update_workflow or runtimeContext.create_workflow:
674 # Create a pipeline template or workflow record and exit.
675 if self.work_api == "containers":
676 uuid = upload_workflow(self, tool, job_order,
677 runtimeContext.project_uuid,
679 uuid=runtimeContext.update_workflow,
680 submit_runner_ram=runtimeContext.submit_runner_ram,
681 name=runtimeContext.name,
682 merged_map=merged_map,
683 submit_runner_image=runtimeContext.submit_runner_image,
685 self.stdout.write(uuid + "\n")
686 return (None, "success")
688 self.apply_reqs(job_order, tool)
690 self.ignore_docker_for_reuse = runtimeContext.ignore_docker_for_reuse
691 self.eval_timeout = runtimeContext.eval_timeout
693 runtimeContext.use_container = True
694 runtimeContext.tmpdir_prefix = "tmp"
695 runtimeContext.work_api = self.work_api
697 if not self.output_name:
698 self.output_name = "Output from workflow %s" % runtimeContext.name
700 self.output_name = cleanup_name_for_collection(self.output_name)
702 if self.work_api == "containers":
703 if self.ignore_docker_for_reuse:
704 raise Exception("--ignore-docker-for-reuse not supported with containers API.")
705 runtimeContext.outdir = "/var/spool/cwl"
706 runtimeContext.docker_outdir = "/var/spool/cwl"
707 runtimeContext.tmpdir = "/tmp"
708 runtimeContext.docker_tmpdir = "/tmp"
710 if runtimeContext.priority < 1 or runtimeContext.priority > 1000:
711 raise Exception("--priority must be in the range 1..1000.")
713 if self.should_estimate_cache_size:
716 def estimate_collection_cache(obj):
717 if obj.get("location", "").startswith("keep:"):
718 m = pdh_size.match(obj["location"][5:])
719 if m and m.group(1) not in visited:
720 visited.add(m.group(1))
721 estimated_size[0] += int(m.group(2))
722 visit_class(job_order, ("File", "Directory"), estimate_collection_cache)
723 runtimeContext.collection_cache_size = max(((estimated_size[0]*192) // (1024*1024))+1, 256)
724 self.collection_cache.set_cap(runtimeContext.collection_cache_size*1024*1024)
726 logger.info("Using collection cache size %s MiB", runtimeContext.collection_cache_size)
729 if runtimeContext.submit:
730 # Submit a runner job to run the workflow for us.
731 if self.work_api == "containers":
733 tool = RunnerContainer(self, updated_tool,
734 tool, loadingContext, runtimeContext.enable_reuse,
737 submit_runner_ram=runtimeContext.submit_runner_ram,
738 name=runtimeContext.name,
739 on_error=runtimeContext.on_error,
740 submit_runner_image=runtimeContext.submit_runner_image,
741 intermediate_output_ttl=runtimeContext.intermediate_output_ttl,
742 merged_map=merged_map,
743 priority=runtimeContext.priority,
744 secret_store=self.secret_store,
745 collection_cache_size=runtimeContext.collection_cache_size,
746 collection_cache_is_default=self.should_estimate_cache_size,
749 runtimeContext.runnerjob = tool.tool["id"]
751 if runtimeContext.cwl_runner_job is not None:
752 self.uuid = runtimeContext.cwl_runner_job.get('uuid')
754 jobiter = tool.job(job_order,
755 self.output_callback,
758 if runtimeContext.submit and not runtimeContext.wait:
759 runnerjob = next(jobiter)
760 runnerjob.run(runtimeContext)
761 self.stdout.write(runnerjob.uuid+"\n")
762 return (None, "success")
764 current_container = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
765 if current_container:
766 logger.info("Running inside container %s", current_container.get("uuid"))
768 self.poll_api = arvados.api('v1', timeout=runtimeContext.http_timeout)
769 self.polling_thread = threading.Thread(target=self.poll_states)
770 self.polling_thread.start()
772 self.task_queue = TaskQueue(self.workflow_eval_lock, self.thread_count)
775 self.workflow_eval_lock.acquire()
777 # Holds the lock while this code runs and releases it when
778 # it is safe to do so in self.workflow_eval_lock.wait(),
779 # at which point on_message can update job state and
780 # process output callbacks.
782 loopperf = Perf(metrics, "jobiter")
784 for runnable in jobiter:
787 if self.stop_polling.is_set():
790 if self.task_queue.error is not None:
791 raise self.task_queue.error
794 with Perf(metrics, "run"):
795 self.start_run(runnable, runtimeContext)
797 if (self.task_queue.in_flight + len(self.processes)) > 0:
798 self.workflow_eval_lock.wait(3)
800 logger.error("Workflow is deadlocked, no runnable processes and not waiting on any pending processes.")
803 if self.stop_polling.is_set():
809 while (self.task_queue.in_flight + len(self.processes)) > 0:
810 if self.task_queue.error is not None:
811 raise self.task_queue.error
812 self.workflow_eval_lock.wait(3)
814 except UnsupportedRequirement:
817 if sys.exc_info()[0] is KeyboardInterrupt or sys.exc_info()[0] is SystemExit:
818 logger.error("Interrupted, workflow will be cancelled")
819 elif isinstance(sys.exc_info()[1], WorkflowException):
820 logger.error("Workflow execution failed:\n%s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
822 logger.exception("Workflow execution failed")
825 self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
826 body={"state": "Failed"}).execute(num_retries=self.num_retries)
828 if self.work_api == "containers" and not current_container:
829 # Not running in a crunch container, so cancel any outstanding processes.
830 for p in self.processes:
832 self.api.container_requests().update(uuid=p,
833 body={"priority": "0"}
834 ).execute(num_retries=self.num_retries)
838 self.workflow_eval_lock.release()
839 self.task_queue.drain()
840 self.stop_polling.set()
841 self.polling_thread.join()
842 self.task_queue.join()
844 if self.final_status == "UnsupportedRequirement":
845 raise UnsupportedRequirement("Check log for details.")
847 if self.final_output is None:
848 raise WorkflowException("Workflow did not return a result.")
850 if runtimeContext.submit and isinstance(tool, Runner):
851 logger.info("Final output collection %s", tool.final_output)
852 if workbench2 or workbench1:
853 logger.info("Output at %scollections/%s", workbench2 or workbench1, tool.final_output)
855 if self.output_tags is None:
856 self.output_tags = ""
859 storage_class_req, _ = tool.get_requirement("http://arvados.org/cwl#OutputStorageClass")
860 if storage_class_req and storage_class_req.get("finalStorageClass"):
861 storage_classes = aslist(storage_class_req["finalStorageClass"])
863 storage_classes = runtimeContext.storage_classes.strip().split(",")
865 output_properties = {}
866 output_properties_req, _ = tool.get_requirement("http://arvados.org/cwl#OutputCollectionProperties")
867 if output_properties_req:
868 builder = make_builder(job_order, tool.hints, tool.requirements, runtimeContext, tool.metadata)
869 for pr in output_properties_req["outputProperties"]:
870 output_properties[pr["propertyName"]] = builder.do_eval(pr["propertyValue"])
872 self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, storage_classes,
873 self.output_tags, output_properties,
875 self.set_crunch_output()
877 if runtimeContext.compute_checksum:
878 adjustDirObjs(self.final_output, partial(get_listing, self.fs_access))
879 adjustFileObjs(self.final_output, partial(compute_checksums, self.fs_access))
881 if self.trash_intermediate and self.final_status == "success":
882 self.trash_intermediate_output()
884 return (self.final_output, self.final_status)