- logc = arvados.collection.Collection(record["log"])
- log = logc.open(logc.keys()[0])
- tmpdir = None
- outdir = None
- keepdir = None
- for l in log:
- # Determine the tmpdir, outdir and keepdir paths from
- # the job run. Unfortunately, we can't take the first
- # values we find (which are expected to be near the
- # top) and stop scanning because if the node fails and
- # the job restarts on a different node these values
- # will different runs, and we need to know about the
- # final run that actually produced output.
-
- g = tmpdirre.match(l)
- if g:
- tmpdir = g.group(1)
- g = outdirre.match(l)
- if g:
- outdir = g.group(1)
- g = keepre.match(l)
- if g:
- keepdir = g.group(1)
-
- outputs = done.done(self, record, tmpdir, outdir, keepdir)
+ with Perf(metrics, "inspect log %s" % self.name):
+ logc = arvados.collection.CollectionReader(record["log"],
+ api_client=self.arvrunner.api,
+ keep_client=self.arvrunner.keep_client,
+ num_retries=self.arvrunner.num_retries)
+ log = logc.open(logc.keys()[0])
+ dirs = {}
+ tmpdir = None
+ outdir = None
+ keepdir = None
+ for l in log:
+ # Determine the tmpdir, outdir and keepdir paths from
+ # the job run. Unfortunately, we can't take the first
+ # values we find (which are expected to be near the
+ # top) and stop scanning because if the node fails and
+ # the job restarts on a different node these values
+ # will different runs, and we need to know about the
+ # final run that actually produced output.
+ g = crunchrunner_re.match(l)
+ if g:
+ dirs[g.group(1)] = g.group(2)
+
+ if processStatus == "permanentFail":
+ done.logtail(logc, logger, "%s error log:" % self.arvrunner.label(self))
+
+ with Perf(metrics, "output collection %s" % self.name):
+ outputs = done.done(self, record, dirs["tmpdir"],
+ dirs["outdir"], dirs["keep"])