#!/usr/bin/env python
+import logging
+
+logger = logging.getLogger('run-command')
+log_handler = logging.StreamHandler()
+log_handler.setFormatter(logging.Formatter("run-command: %(message)s"))
+logger.addHandler(log_handler)
+logger.setLevel(logging.INFO)
+
import arvados
import re
import os
import subprocess
import sys
import shutil
-import subst
+import crunchutil.subst as subst
import time
import arvados.commands.put as put
import signal
import stat
import copy
+import traceback
+import pprint
+import multiprocessing
+import crunchutil.robust_put as robust_put
+import crunchutil.vwd as vwd
os.umask(0077)
os.chdir("output")
+outdir = os.getcwd()
+
taskp = None
jobp = arvados.current_job()['script_parameters']
if len(arvados.current_task()['parameters']) > 0:
- p = arvados.current_task()['parameters']
+ taskp = arvados.current_task()['parameters']
links = []
-def sub_link(v):
- r = os.path.basename(v)
- os.symlink(os.path.join(os.environ['TASK_KEEPMOUNT'], v) , r)
- links.append(r)
- return r
-
def sub_tmpdir(v):
return os.path.join(arvados.current_task().tmpdir, 'tmpdir')
+def sub_outdir(v):
+ return outdir
+
def sub_cores(v):
- return os.environ['CRUNCH_NODE_SLOTS']
+ return str(multiprocessing.cpu_count())
def sub_jobid(v):
return os.environ['JOB_UUID']
def sub_taskid(v):
return os.environ['TASK_UUID']
-subst.default_subs["link "] = sub_link
+def sub_jobsrc(v):
+ return os.environ['CRUNCH_SRC']
+
subst.default_subs["task.tmpdir"] = sub_tmpdir
+subst.default_subs["task.outdir"] = sub_outdir
+subst.default_subs["job.srcdir"] = sub_jobsrc
subst.default_subs["node.cores"] = sub_cores
-subst.default_subs["job.id"] = sub_jobid
-subst.default_subs["task.id"] = sub_taskid
-
-rcode = 1
-
-def machine_progress(bytes_written, bytes_expected):
- return "run-command: wrote {} total {}\n".format(
- bytes_written, -1 if (bytes_expected is None) else bytes_expected)
+subst.default_subs["job.uuid"] = sub_jobid
+subst.default_subs["task.uuid"] = sub_taskid
class SigHandler(object):
def __init__(self):
return r
elif isinstance(c, list):
return expand_list(p, c)
- elif isinstance(c, str):
+ elif isinstance(c, str) or isinstance(c, unicode):
return [subst.do_substitution(p, c)]
return []
fn = subst.do_substitution(p, value)
mode = os.stat(fn).st_mode
prefix = fn[len(os.environ['TASK_KEEPMOUNT'])+1:]
- if mode != None:
+ if mode is not None:
if stat.S_ISDIR(mode):
- items = ["$(dir %s/%s)" % (prefix, l) for l in os.listdir(fn)]
+ items = ["$(dir %s/%s/)" % (prefix, l) for l in os.listdir(fn)]
elif stat.S_ISREG(mode):
with open(fn) as f:
items = [line for line in f]
else:
return None
-if "task.foreach" in jobp:
- if arvados.current_task()['sequence'] == 0:
- var = jobp["task.foreach"]
- items = get_items(jobp, jobp[var])
- if items != None:
- print("run-command: parallelizing on %s with items %s" % (var, items))
-
- for i in items:
- params = copy.copy(jobp)
- params[var] = i
- arvados.api().job_tasks().create(body={
- 'job_uuid': arvados.current_job()['uuid'],
- 'created_by_job_task_uuid': arvados.current_task()['uuid'],
- 'sequence': 1,
- 'parameters': params
- }
- ).execute()
- sys.exit(0)
- else:
- sys.exit(1)
-else:
- p = jobp
+stdoutname = None
+stdoutfile = None
+stdinname = None
+stdinfile = None
+rcode = 1
try:
- cmd = expand_list(p, p["command"])
+ if "task.foreach" in jobp:
+ if arvados.current_task()['sequence'] == 0:
+ var = jobp["task.foreach"]
+ items = get_items(jobp, jobp[var])
+ logger.info("parallelizing on %s with items %s" % (var, items))
+ if items is not None:
+ for i in items:
+ params = copy.copy(jobp)
+ params[var] = i
+ arvados.api().job_tasks().create(body={
+ 'job_uuid': arvados.current_job()['uuid'],
+ 'created_by_job_task_uuid': arvados.current_task()['uuid'],
+ 'sequence': 1,
+ 'parameters': params
+ }
+ ).execute()
+ if "task.vwd" in jobp:
+ # Base vwd collection will be merged with output fragments from
+ # the other tasks by crunch.
+ arvados.current_task().set_output(subst.do_substitution(jobp, jobp["task.vwd"]))
+ else:
+ arvados.current_task().set_output(None)
+ sys.exit(0)
+ else:
+ sys.exit(1)
+ else:
+ taskp = jobp
+
+ if "task.vwd" in taskp:
+ # Populate output directory with symlinks to files in collection
+ vwd.checkout(subst.do_substitution(taskp, taskp["task.vwd"]), outdir)
+
+ if "task.cwd" in taskp:
+ os.chdir(subst.do_substitution(taskp, taskp["task.cwd"]))
+
+ cmd = expand_list(taskp, taskp["command"])
- stdoutname = None
- stdoutfile = None
- if "save.stdout" in p:
- stdoutname = subst.do_substitution(p, p["save.stdout"])
+ if "task.stdin" in taskp:
+ stdinname = subst.do_substitution(taskp, taskp["task.stdin"])
+ stdinfile = open(stdinname, "rb")
+
+ if "task.stdout" in taskp:
+ stdoutname = subst.do_substitution(taskp, taskp["task.stdout"])
stdoutfile = open(stdoutname, "wb")
- print("run-command: {}{}".format(' '.join(cmd), (" > " + stdoutname) if stdoutname != None else ""))
+ logger.info("{}{}{}".format(' '.join(cmd), (" < " + stdinname) if stdinname is not None else "", (" > " + stdoutname) if stdoutname is not None else ""))
+except subst.SubstitutionError as e:
+ logger.error(str(e))
+ logger.error("task parameters were:")
+ logger.error(pprint.pformat(taskp))
+ sys.exit(1)
+except Exception as e:
+ logger.exception("caught exception")
+ logger.error("task parameters were:")
+ logger.error(pprint.pformat(taskp))
+ sys.exit(1)
- sp = subprocess.Popen(cmd, shell=False, stdout=stdoutfile)
+try:
+ sp = subprocess.Popen(cmd, shell=False, stdin=stdinfile, stdout=stdoutfile)
sig = SigHandler()
# forward signals to the process.
# wait for process to complete.
rcode = sp.wait()
- if sig.sig != None:
- print("run-command: terminating on signal %s" % sig.sig)
+ if sig.sig is not None:
+ logger.critical("terminating on signal %s" % sig.sig)
sys.exit(2)
else:
- print("run-command: completed with exit code %i (%s)" % (rcode, "success" if rcode == 0 else "failed"))
+ logger.info("completed with exit code %i (%s)" % (rcode, "success" if rcode == 0 else "failed"))
except Exception as e:
- print("run-command: caught exception: {}".format(e))
+ logger.exception("caught exception")
# restore default signal handlers.
signal.signal(signal.SIGINT, signal.SIG_DFL)
for l in links:
os.unlink(l)
-print("run-command: the following output files will be saved to keep:")
-
-subprocess.call(["find", ".", "-type", "f", "-printf", "run-command: %12.12s %h/%f\\n"])
-
-print("run-command: start writing output to keep")
-
-done = False
-resume_cache = put.ResumeCache(os.path.join(arvados.current_task().tmpdir, "upload-output-checkpoint"))
-reporter = put.progress_writer(machine_progress)
-bytes_expected = put.expected_bytes_for(".")
-while not done:
- try:
- out = put.ArvPutCollectionWriter.from_cache(resume_cache, reporter, bytes_expected)
- out.do_queued_work()
- out.write_directory_tree(".", max_manifest_depth=0)
- outuuid = out.finish()
- api.job_tasks().update(uuid=arvados.current_task()['uuid'],
- body={
- 'output':outuuid,
- 'success': (rcode == 0),
- 'progress':1.0
- }).execute()
- done = True
- except KeyboardInterrupt:
- print("run-command: terminating on signal 2")
- sys.exit(2)
- except Exception as e:
- print("run-command: caught exception: {}".format(e))
- time.sleep(5)
+logger.info("the following output files will be saved to keep:")
+
+subprocess.call(["find", ".", "-type", "f", "-printf", "run-command: %12.12s %h/%f\\n"], stdout=sys.stderr)
+
+logger.info("start writing output to keep")
+
+if "task.vwd" in taskp:
+ if "task.foreach" in jobp:
+ # This is a subtask, so don't merge with the original collection, that will happen at the end
+ outcollection = vwd.checkin(subst.do_substitution(taskp, taskp["task.vwd"]), outdir, merge=False).manifest_text()
+ else:
+ # Just a single task, so do merge with the original collection
+ outcollection = vwd.checkin(subst.do_substitution(taskp, taskp["task.vwd"]), outdir, merge=True).manifest_text()
+else:
+ outcollection = robust_put.upload(outdir, logger)
+
+api.job_tasks().update(uuid=arvados.current_task()['uuid'],
+ body={
+ 'output': outcollection,
+ 'success': (rcode == 0),
+ 'progress':1.0
+ }).execute()
sys.exit(rcode)