import arvados
import arvados.config
+from arvados.keep import KeepClient
+from arvados.errors import ApiError
from .arvcontainer import ArvadosContainer, RunnerContainer
from .arvjob import ArvadosJob, RunnerJob, RunnerTemplate
-from. runner import Runner
+from. runner import Runner, upload_instance
from .arvtool import ArvadosCommandTool
from .arvworkflow import ArvadosWorkflow, upload_workflow
-from .fsaccess import CollectionFsAccess
+from .fsaccess import CollectionFsAccess, CollectionFetcher
from .perf import Perf
from .pathmapper import FinalOutputPathMapper
+from ._version import __version__
from cwltool.pack import pack
from cwltool.process import shortname, UnsupportedRequirement, getListing
"""
- def __init__(self, api_client, work_api=None, keep_client=None, output_name=None):
+ def __init__(self, api_client, work_api=None, keep_client=None, output_name=None, output_tags=None, num_retries=4):
self.api = api_client
self.processes = {}
self.lock = threading.Lock()
self.final_output = None
self.final_status = None
self.uploaded = {}
- self.num_retries = 4
+ self.num_retries = num_retries
self.uuid = None
- self.work_api = work_api
self.stop_polling = threading.Event()
self.poll_api = None
self.pipeline = None
self.final_output_collection = None
self.output_name = output_name
+ self.output_tags = output_tags
+ self.project_uuid = None
+
if keep_client is not None:
self.keep_client = keep_client
else:
self.keep_client = arvados.keep.KeepClient(api_client=self.api, num_retries=self.num_retries)
- if self.work_api is None:
- # todo: autodetect API to use.
- self.work_api = "jobs"
-
- if self.work_api not in ("containers", "jobs"):
- raise Exception("Unsupported API '%s'" % self.work_api)
+ self.work_api = None
+ expected_api = ["jobs", "containers"]
+ for api in expected_api:
+ try:
+ methods = self.api._rootDesc.get('resources')[api]['methods']
+ if ('httpMethod' in methods['create'] and
+ (work_api == api or work_api is None)):
+ self.work_api = api
+ break
+ except KeyError:
+ pass
+
+ if not self.work_api:
+ if work_api is None:
+ raise Exception("No supported APIs")
+ else:
+ raise Exception("Unsupported API '%s', expected one of %s" % (work_api, expected_api))
def arv_make_tool(self, toolpath_object, **kwargs):
kwargs["work_api"] = self.work_api
logger.info("Job %s (%s) is Running", j.name, uuid)
j.running = True
j.update_pipeline_component(event["properties"]["new_attributes"])
- elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled"):
+ elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled", "Final"):
uuid = event["object_uuid"]
try:
self.cond.acquire()
j = self.processes[uuid]
- logger.info("Job %s (%s) is %s", j.name, uuid, event["properties"]["new_attributes"]["state"])
+ txt = self.work_api[0].upper() + self.work_api[1:-1]
+ logger.info("%s %s (%s) is %s", txt, j.name, uuid, event["properties"]["new_attributes"]["state"])
with Perf(metrics, "done %s" % j.name):
j.done(event["properties"]["new_attributes"])
self.cond.notify()
Runs in a separate thread.
"""
- while True:
- self.stop_polling.wait(15)
- if self.stop_polling.is_set():
- break
- with self.lock:
- keys = self.processes.keys()
- if not keys:
- continue
-
- if self.work_api == "containers":
- table = self.poll_api.containers()
- elif self.work_api == "jobs":
- table = self.poll_api.jobs()
-
- try:
- proc_states = table.list(filters=[["uuid", "in", keys]]).execute(num_retries=self.num_retries)
- except Exception as e:
- logger.warn("Error checking states on API server: %s", e)
- continue
-
- for p in proc_states["items"]:
- self.on_message({
- "object_uuid": p["uuid"],
- "event_type": "update",
- "properties": {
- "new_attributes": p
- }
- })
+ try:
+ while True:
+ self.stop_polling.wait(15)
+ if self.stop_polling.is_set():
+ break
+ with self.lock:
+ keys = self.processes.keys()
+ if not keys:
+ continue
+
+ if self.work_api == "containers":
+ table = self.poll_api.container_requests()
+ elif self.work_api == "jobs":
+ table = self.poll_api.jobs()
+
+ try:
+ proc_states = table.list(filters=[["uuid", "in", keys]]).execute(num_retries=self.num_retries)
+ except Exception as e:
+ logger.warn("Error checking states on API server: %s", e)
+ continue
+
+ for p in proc_states["items"]:
+ self.on_message({
+ "object_uuid": p["uuid"],
+ "event_type": "update",
+ "properties": {
+ "new_attributes": p
+ }
+ })
+ except:
+ logger.error("Fatal error in state polling thread.", exc_info=(sys.exc_info()[1] if self.debug else False))
+ self.cond.acquire()
+ self.processes.clear()
+ self.cond.notify()
+ self.cond.release()
+ finally:
+ self.stop_polling.set()
def get_uploaded(self):
return self.uploaded.copy()
for v in obj:
self.check_writable(v)
- def make_output_collection(self, name, outputObj):
+ def make_output_collection(self, name, tagsString, outputObj):
outputObj = copy.deepcopy(outputObj)
files = []
srccollections = {}
for k,v in generatemapper.items():
+ if k.startswith("_:"):
+ if v.type == "Directory":
+ continue
+ if v.type == "CreateFile":
+ with final.open(v.target, "wb") as f:
+ f.write(v.resolved.encode("utf-8"))
+ continue
+
+ if not k.startswith("keep:"):
+ raise Exception("Output source is not in keep or a literal")
sp = k.split("/")
srccollection = sp[0][5:]
if srccollection not in srccollections:
- srccollections[srccollection] = arvados.collection.CollectionReader(
- srccollection,
- api_client=self.api,
- keep_client=self.keep_client,
- num_retries=self.num_retries)
+ try:
+ srccollections[srccollection] = arvados.collection.CollectionReader(
+ srccollection,
+ api_client=self.api,
+ keep_client=self.keep_client,
+ num_retries=self.num_retries)
+ except arvados.errors.ArgumentError as e:
+ logger.error("Creating CollectionReader for '%s' '%s': %s", k, v, e)
+ raise
reader = srccollections[srccollection]
try:
srcpath = "/".join(sp[1:]) if len(sp) > 1 else "."
def rewrite(fileobj):
fileobj["location"] = generatemapper.mapper(fileobj["location"]).target
- for k in ("basename", "size", "listing"):
+ for k in ("basename", "listing", "contents"):
if k in fileobj:
del fileobj[k]
final.api_response()["name"],
final.manifest_locator())
- self.final_output_collection = final
+ final_uuid = final.manifest_locator()
+ tags = tagsString.split(',')
+ for tag in tags:
+ self.api.links().create(body={
+ "head_uuid": final_uuid, "link_class": "tag", "name": tag
+ }).execute(num_retries=self.num_retries)
+
+ def finalcollection(fileobj):
+ fileobj["location"] = "keep:%s/%s" % (final.portable_data_hash(), fileobj["location"])
+
+ adjustDirObjs(outputObj, finalcollection)
+ adjustFileObjs(outputObj, finalcollection)
+
+ return (outputObj, final)
+
+ def set_crunch_output(self):
+ if self.work_api == "containers":
+ try:
+ current = self.api.containers().current().execute(num_retries=self.num_retries)
+ except ApiError as e:
+ # Status code 404 just means we're not running in a container.
+ if e.resp.status != 404:
+ logger.info("Getting current container: %s", e)
+ return
+ try:
+ self.api.containers().update(uuid=current['uuid'],
+ body={
+ 'output': self.final_output_collection.portable_data_hash(),
+ }).execute(num_retries=self.num_retries)
+ except Exception as e:
+ logger.info("Setting container output: %s", e)
+ elif self.work_api == "jobs" and "TASK_UUID" in os.environ:
+ self.api.job_tasks().update(uuid=os.environ["TASK_UUID"],
+ body={
+ 'output': self.final_output_collection.portable_data_hash(),
+ 'success': self.final_status == "success",
+ 'progress':1.0
+ }).execute(num_retries=self.num_retries)
def arv_executor(self, tool, job_order, **kwargs):
self.debug = kwargs.get("debug")
tool.visit(self.check_writable)
- useruuid = self.api.users().current().execute()["uuid"]
- self.project_uuid = kwargs.get("project_uuid") if kwargs.get("project_uuid") else useruuid
+ self.project_uuid = kwargs.get("project_uuid")
self.pipeline = None
make_fs_access = kwargs.get("make_fs_access") or partial(CollectionFsAccess,
api_client=self.api,
keep_client=self.keep_client)
self.fs_access = make_fs_access(kwargs["basedir"])
- if kwargs.get("create_template"):
- tmpl = RunnerTemplate(self, tool, job_order, kwargs.get("enable_reuse"))
- tmpl.save()
- # cwltool.main will write our return value to stdout.
- return tmpl.uuid
-
- if kwargs.get("create_workflow") or kwargs.get("update_workflow"):
- return upload_workflow(self, tool, job_order, self.project_uuid, kwargs.get("update_workflow"))
+ existing_uuid = kwargs.get("update_workflow")
+ if existing_uuid or kwargs.get("create_workflow"):
+ if self.work_api == "jobs":
+ tmpl = RunnerTemplate(self, tool, job_order,
+ kwargs.get("enable_reuse"),
+ uuid=existing_uuid,
+ submit_runner_ram=kwargs.get("submit_runner_ram"))
+ tmpl.save()
+ # cwltool.main will write our return value to stdout.
+ return tmpl.uuid
+ else:
+ return upload_workflow(self, tool, job_order,
+ self.project_uuid,
+ uuid=existing_uuid,
+ submit_runner_ram=kwargs.get("submit_runner_ram"))
self.ignore_docker_for_reuse = kwargs.get("ignore_docker_for_reuse")
kwargs["docker_outdir"] = "$(task.outdir)"
kwargs["tmpdir"] = "$(task.tmpdir)"
+ upload_instance(self, shortname(tool.tool["id"]), tool, job_order)
+
runnerjob = None
if kwargs.get("submit"):
if self.work_api == "containers":
self.output_callback,
**kwargs).next()
else:
- runnerjob = RunnerContainer(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name)
+ runnerjob = RunnerContainer(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name,
+ self.output_tags, submit_runner_ram=kwargs.get("submit_runner_ram"))
else:
- runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name)
+ runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name,
+ self.output_tags, submit_runner_ram=kwargs.get("submit_runner_ram"))
if not kwargs.get("submit") and "cwl_runner_job" not in kwargs and not self.work_api == "containers":
# Create pipeline for local run
logger.info("Pipeline instance %s", self.pipeline["uuid"])
if runnerjob and not kwargs.get("wait"):
- runnerjob.run()
+ runnerjob.run(wait=kwargs.get("wait"))
return runnerjob.uuid
self.poll_api = arvados.api('v1')
loopperf.__enter__()
for runnable in jobiter:
loopperf.__exit__()
+
+ if self.stop_polling.is_set():
+ break
+
if runnable:
with Perf(metrics, "run"):
runnable.run(**kwargs)
if sys.exc_info()[0] is KeyboardInterrupt:
logger.error("Interrupted, marking pipeline as failed")
else:
- logger.error("Caught unhandled exception, marking pipeline as failed. Error was: %s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
+ logger.error("Execution failed: %s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
if self.pipeline:
self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
body={"state": "Failed"}).execute(num_retries=self.num_retries)
if self.final_status == "UnsupportedRequirement":
raise UnsupportedRequirement("Check log for details.")
- if self.final_status != "success":
- raise WorkflowException("Workflow failed.")
-
if self.final_output is None:
raise WorkflowException("Workflow did not return a result.")
else:
if self.output_name is None:
self.output_name = "Output of %s" % (shortname(tool.tool["id"]))
- self.make_output_collection(self.output_name, self.final_output)
+ if self.output_tags is None:
+ self.output_tags = ""
+ self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, self.output_tags, self.final_output)
+ self.set_crunch_output()
+
+ if self.final_status != "success":
+ raise WorkflowException("Workflow failed.")
if kwargs.get("compute_checksum"):
adjustDirObjs(self.final_output, partial(getListing, self.fs_access))
arvpkg = pkg_resources.require("arvados-python-client")
cwlpkg = pkg_resources.require("cwltool")
- return "%s %s, %s %s, %s %s" % (sys.argv[0], arvcwlpkg[0].version,
+ return "%s %s %s, %s %s, %s %s" % (sys.argv[0], __version__, arvcwlpkg[0].version,
"arvados-python-client", arvpkg[0].version,
"cwltool", cwlpkg[0].version)
parser.add_argument("--project-uuid", type=str, metavar="UUID", help="Project that will own the workflow jobs, if not provided, will go to home project.")
parser.add_argument("--output-name", type=str, help="Name to use for collection that stores the final output.", default=None)
+ parser.add_argument("--output-tags", type=str, help="Tags for the final output collection separated by commas, e.g., '--output-tags tag0,tag1,tag2'.", default=None)
parser.add_argument("--ignore-docker-for-reuse", action="store_true",
help="Ignore Docker image version when deciding whether to reuse past jobs.",
default=False)
default=True, dest="submit")
exgroup.add_argument("--local", action="store_false", help="Run workflow on local host (submits jobs to Arvados).",
default=True, dest="submit")
- exgroup.add_argument("--create-template", action="store_true", help="Create an Arvados pipeline template.")
- exgroup.add_argument("--create-workflow", action="store_true", help="Create an Arvados workflow.")
- exgroup.add_argument("--update-workflow", type=str, metavar="UUID", help="Update existing Arvados workflow with uuid.")
+ exgroup.add_argument("--create-template", action="store_true", help="(Deprecated) synonym for --create-workflow.",
+ dest="create_workflow")
+ exgroup.add_argument("--create-workflow", action="store_true", help="Create an Arvados workflow (if using the 'containers' API) or pipeline template (if using the 'jobs' API). See --api.")
+ exgroup.add_argument("--update-workflow", type=str, metavar="UUID", help="Update an existing Arvados workflow or pipeline template with the given UUID.")
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--wait", action="store_true", help="After submitting workflow runner job, wait for completion.",
parser.add_argument("--api", type=str,
default=None, dest="work_api",
- help="Select work submission API, one of 'jobs' or 'containers'.")
+ help="Select work submission API, one of 'jobs' or 'containers'. Default is 'jobs' if that API is available, otherwise 'containers'.")
parser.add_argument("--compute-checksum", action="store_true", default=False,
help="Compute checksum of contents while collecting outputs",
dest="compute_checksum")
+ parser.add_argument("--submit-runner-ram", type=int,
+ help="RAM (in MiB) required for the workflow runner job (default 1024)",
+ default=1024)
+
parser.add_argument("workflow", type=str, nargs="?", default=None, help="The workflow to execute")
parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.")
job_order_object = None
arvargs = parser.parse_args(args)
- if (arvargs.create_template or arvargs.create_workflow or arvargs.update_workflow) and not arvargs.job_order:
+
+ if arvargs.update_workflow:
+ if arvargs.update_workflow.find('-7fd4e-') == 5:
+ want_api = 'containers'
+ elif arvargs.update_workflow.find('-p5p6p-') == 5:
+ want_api = 'jobs'
+ else:
+ want_api = None
+ if want_api and arvargs.work_api and want_api != arvargs.work_api:
+ logger.error('--update-workflow arg {!r} uses {!r} API, but --api={!r} specified'.format(
+ arvargs.update_workflow, want_api, arvargs.work_api))
+ return 1
+ arvargs.work_api = want_api
+
+ if (arvargs.create_workflow or arvargs.update_workflow) and not arvargs.job_order:
job_order_object = ({}, "")
add_arv_hints()
try:
if api_client is None:
api_client=arvados.api('v1', model=OrderedJsonModel())
- runner = ArvCwlRunner(api_client, work_api=arvargs.work_api, keep_client=keep_client, output_name=arvargs.output_name)
+ if keep_client is None:
+ keep_client = arvados.keep.KeepClient(api_client=api_client, num_retries=4)
+ runner = ArvCwlRunner(api_client, work_api=arvargs.work_api, keep_client=keep_client,
+ num_retries=4, output_name=arvargs.output_name,
+ output_tags=arvargs.output_tags)
except Exception as e:
logger.error(e)
return 1
arvargs.conformance_test = None
arvargs.use_container = True
+ arvargs.relax_path_checks = True
return cwltool.main.main(args=arvargs,
stdout=stdout,
makeTool=runner.arv_make_tool,
versionfunc=versionstring,
job_order_object=job_order_object,
- make_fs_access=partial(CollectionFsAccess, api_client=api_client))
+ make_fs_access=partial(CollectionFsAccess,
+ api_client=api_client,
+ keep_client=keep_client),
+ fetcher_constructor=partial(CollectionFetcher,
+ api_client=api_client,
+ keep_client=keep_client))