if not args.dry_run:
stdoutfile = open(stdoutname, "wb")
+ if "task.env" in taskp:
+ env = copy.copy(os.environ)
+ for k,v in taskp["task.env"].items():
+ env[k] = subst.do_substitution(taskp, v)
+ else:
+ env = None
+
logger.info("{}{}{}".format(' | '.join([' '.join(c) for c in cmd]), (" < " + stdinname) if stdinname is not None else "", (" > " + stdoutname) if stdoutname is not None else ""))
if args.dry_run:
# this is an intermediate command in the pipeline, so its stdout should go to a pipe
next_stdout = subprocess.PIPE
- sp = subprocess.Popen(cmd[i], shell=False, stdin=next_stdin, stdout=next_stdout)
+ sp = subprocess.Popen(cmd[i], shell=False, stdin=next_stdin, stdout=next_stdout, env=env)
# Need to close the FDs on our side so that subcommands will get SIGPIPE if the
# consuming process ends prematurely.
logger.info("the following output files will be saved to keep:")
-subprocess.call(["find", ".", "-printf", "run-command: %12.12s %h/%f\\n"], stdout=sys.stderr, cwd=outdir)
+subprocess.call(["find", "-L", ".", "-type", "f", "-printf", "run-command: %12.12s %h/%f\\n"], stdout=sys.stderr, cwd=outdir)
logger.info("start writing output to keep")
if stat.S_ISLNK(s.st_mode):
os.unlink(os.path.join(root, f))
-outcollection = vwd.checkin(outdir).manifest_text()
+(outcollection, checkin_error) = vwd.checkin(outdir)
# Success if we ran any subprocess, and they all exited 0.
-success = rcode and all(status == 0 for status in rcode.itervalues())
+success = rcode and all(status == 0 for status in rcode.itervalues()) and not checkin_error
api.job_tasks().update(uuid=arvados.current_task()['uuid'],
body={
- 'output': outcollection,
+ 'output': outcollection.manifest_text(),
'success': success,
'progress':1.0
}).execute()