Do not pipe into `grep -q`, because that stops reading as soon as a
[arvados.git] / tools / crunchstat-summary / crunchstat_summary / summarizer.py
index 9b8410e9aafcb9a6b22fbe8aa21a0aa4bb0a27d7..463c552c4f1eb5caf0868337858197a747bc8fa8 100644 (file)
@@ -2,11 +2,9 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-from __future__ import print_function
-
 import arvados
 import collections
-import crunchstat_summary.chartjs
+import crunchstat_summary.dygraphs
 import crunchstat_summary.reader
 import datetime
 import functools
@@ -24,23 +22,28 @@ from crunchstat_summary import logger
 # number of GiB. (Actual nodes tend to be sold in sizes like 8 GiB
 # that have amounts like 7.5 GiB according to the kernel.)
 AVAILABLE_RAM_RATIO = 0.95
-
+MB=2**20
 
 # Workaround datetime.datetime.strptime() thread-safety bug by calling
 # it once before starting threads.  https://bugs.python.org/issue7980
 datetime.datetime.strptime('1999-12-31_23:59:59', '%Y-%m-%d_%H:%M:%S')
 
 
+WEBCHART_CLASS = crunchstat_summary.dygraphs.DygraphsChart
+
+
 class Task(object):
     def __init__(self):
         self.starttime = None
+        self.finishtime = None
         self.series = collections.defaultdict(list)
 
 
 class Summarizer(object):
-    def __init__(self, logdata, label=None, skip_child_jobs=False):
+    def __init__(self, logdata, label=None, skip_child_jobs=False, uuid=None, **kwargs):
         self._logdata = logdata
 
+        self.uuid = uuid
         self.label = label
         self.starttime = None
         self.finishtime = None
@@ -66,129 +69,176 @@ class Summarizer(object):
 
     def run(self):
         logger.debug("%s: parsing logdata %s", self.label, self._logdata)
-        for line in self._logdata:
-            m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) job_task (?P<task_uuid>\S+)$', line)
-            if m:
-                seq = int(m.group('seq'))
-                uuid = m.group('task_uuid')
-                self.seq_to_uuid[seq] = uuid
-                logger.debug('%s: seq %d is task %s', self.label, seq, uuid)
-                continue
-
-            m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) (success in|failure \(#., permanent\) after) (?P<elapsed>\d+) seconds', line)
-            if m:
-                task_id = self.seq_to_uuid[int(m.group('seq'))]
-                elapsed = int(m.group('elapsed'))
-                self.task_stats[task_id]['time'] = {'elapsed': elapsed}
-                if elapsed > self.stats_max['time']['elapsed']:
-                    self.stats_max['time']['elapsed'] = elapsed
-                continue
-
-            m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) stderr Queued job (?P<uuid>\S+)$', line)
-            if m:
-                uuid = m.group('uuid')
-                if self._skip_child_jobs:
-                    logger.warning('%s: omitting stats from child job %s'
-                                   ' because --skip-child-jobs flag is on',
-                                   self.label, uuid)
+        with self._logdata as logdata:
+            self._run(logdata)
+
+    def _run(self, logdata):
+        self.detected_crunch1 = False
+        for line in logdata:
+            if not self.detected_crunch1 and '-8i9sb-' in line:
+                self.detected_crunch1 = True
+
+            if self.detected_crunch1:
+                m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) job_task (?P<task_uuid>\S+)$', line)
+                if m:
+                    seq = int(m.group('seq'))
+                    uuid = m.group('task_uuid')
+                    self.seq_to_uuid[seq] = uuid
+                    logger.debug('%s: seq %d is task %s', self.label, seq, uuid)
                     continue
-                logger.debug('%s: follow %s', self.label, uuid)
-                child_summarizer = JobSummarizer(uuid)
-                child_summarizer.stats_max = self.stats_max
-                child_summarizer.task_stats = self.task_stats
-                child_summarizer.tasks = self.tasks
-                child_summarizer.starttime = self.starttime
-                child_summarizer.run()
-                logger.debug('%s: done %s', self.label, uuid)
-                continue
 
-            m = re.search(r'^(?P<timestamp>[^\s.]+)(\.\d+)? (?P<job_uuid>\S+) \d+ (?P<seq>\d+) stderr crunchstat: (?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n', line)
-            if not m:
-                continue
+                m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) (success in|failure \(#., permanent\) after) (?P<elapsed>\d+) seconds', line)
+                if m:
+                    task_id = self.seq_to_uuid[int(m.group('seq'))]
+                    elapsed = int(m.group('elapsed'))
+                    self.task_stats[task_id]['time'] = {'elapsed': elapsed}
+                    if elapsed > self.stats_max['time']['elapsed']:
+                        self.stats_max['time']['elapsed'] = elapsed
+                    continue
 
-            try:
-                if self.label is None:
-                    self.label = m.group('job_uuid')
-                    logger.debug('%s: using job uuid as label', self.label)
-                if m.group('category').endswith(':'):
-                    # "stderr crunchstat: notice: ..."
+                m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) stderr Queued job (?P<uuid>\S+)$', line)
+                if m:
+                    uuid = m.group('uuid')
+                    if self._skip_child_jobs:
+                        logger.warning('%s: omitting stats from child job %s'
+                                       ' because --skip-child-jobs flag is on',
+                                       self.label, uuid)
+                        continue
+                    logger.debug('%s: follow %s', self.label, uuid)
+                    child_summarizer = NewSummarizer(uuid)
+                    child_summarizer.stats_max = self.stats_max
+                    child_summarizer.task_stats = self.task_stats
+                    child_summarizer.tasks = self.tasks
+                    child_summarizer.starttime = self.starttime
+                    child_summarizer.run()
+                    logger.debug('%s: done %s', self.label, uuid)
                     continue
-                elif m.group('category') in ('error', 'caught'):
+
+                # 2017-12-02_17:15:08 e51c5-8i9sb-mfp68stkxnqdd6m 63676 0 stderr crunchstat: keepcalls 0 put 2576 get -- interval 10.0000 seconds 0 put 2576 get
+                m = re.search(r'^(?P<timestamp>[^\s.]+)(\.\d+)? (?P<job_uuid>\S+) \d+ (?P<seq>\d+) stderr (?P<crunchstat>crunchstat: )(?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n$', line)
+                if not m:
                     continue
-                elif m.group('category') == 'read':
-                    # "stderr crunchstat: read /proc/1234/net/dev: ..."
-                    # (crunchstat formatting fixed, but old logs still say this)
+            else:
+                # crunch2
+                # 2017-12-01T16:56:24.723509200Z crunchstat: keepcalls 0 put 3 get -- interval 10.0000 seconds 0 put 3 get
+                m = re.search(r'^(?P<timestamp>\S+) (?P<crunchstat>crunchstat: )?(?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n$', line)
+                if not m:
                     continue
+
+            if self.label is None:
+                try:
+                    self.label = m.group('job_uuid')
+                except IndexError:
+                    self.label = 'label #1'
+            category = m.group('category')
+            if category.endswith(':'):
+                # "stderr crunchstat: notice: ..."
+                continue
+            elif category in ('error', 'caught'):
+                continue
+            elif category in ('read', 'open', 'cgroup', 'CID', 'Running'):
+                # "stderr crunchstat: read /proc/1234/net/dev: ..."
+                # (old logs are less careful with unprefixed error messages)
+                continue
+
+            if self.detected_crunch1:
                 task_id = self.seq_to_uuid[int(m.group('seq'))]
-                task = self.tasks[task_id]
+            else:
+                task_id = 'container'
+            task = self.tasks[task_id]
 
-                # Use the first and last crunchstat timestamps as
-                # approximations of starttime and finishtime.
+            # Use the first and last crunchstat timestamps as
+            # approximations of starttime and finishtime.
+            timestamp = m.group('timestamp')
+            if timestamp[10:11] == '_':
                 timestamp = datetime.datetime.strptime(
-                    m.group('timestamp'), '%Y-%m-%d_%H:%M:%S')
-                if not task.starttime:
-                    task.starttime = timestamp
-                    logger.debug('%s: task %s starttime %s',
-                                 self.label, task_id, timestamp)
+                    timestamp, '%Y-%m-%d_%H:%M:%S')
+            elif timestamp[10:11] == 'T':
+                timestamp = datetime.datetime.strptime(
+                    timestamp[:19], '%Y-%m-%dT%H:%M:%S')
+            else:
+                raise ValueError("Cannot parse timestamp {!r}".format(
+                    timestamp))
+
+            if task.starttime is None:
+                logger.debug('%s: task %s starttime %s',
+                             self.label, task_id, timestamp)
+            if task.starttime is None or timestamp < task.starttime:
+                task.starttime = timestamp
+            if task.finishtime is None or timestamp > task.finishtime:
                 task.finishtime = timestamp
 
-                if not self.starttime:
-                    self.starttime = timestamp
+            if self.starttime is None or timestamp < self.starttime:
+                self.starttime = timestamp
+            if self.finishtime is None or timestamp > self.finishtime:
                 self.finishtime = timestamp
 
-                this_interval_s = None
-                for group in ['current', 'interval']:
-                    if not m.group(group):
-                        continue
-                    category = m.group('category')
-                    words = m.group(group).split(' ')
-                    stats = {}
+            if (not self.detected_crunch1) and task.starttime is not None and task.finishtime is not None:
+                elapsed = (task.finishtime - task.starttime).seconds
+                self.task_stats[task_id]['time'] = {'elapsed': elapsed}
+                if elapsed > self.stats_max['time']['elapsed']:
+                    self.stats_max['time']['elapsed'] = elapsed
+
+            this_interval_s = None
+            for group in ['current', 'interval']:
+                if not m.group(group):
+                    continue
+                category = m.group('category')
+                words = m.group(group).split(' ')
+                stats = {}
+                try:
                     for val, stat in zip(words[::2], words[1::2]):
-                        try:
-                            if '.' in val:
-                                stats[stat] = float(val)
-                            else:
-                                stats[stat] = int(val)
-                        except ValueError as e:
-                            raise ValueError(
-                                'Error parsing {} stat: {!r}'.format(
-                                    stat, e))
-                    if 'user' in stats or 'sys' in stats:
-                        stats['user+sys'] = stats.get('user', 0) + stats.get('sys', 0)
-                    if 'tx' in stats or 'rx' in stats:
-                        stats['tx+rx'] = stats.get('tx', 0) + stats.get('rx', 0)
-                    for stat, val in stats.iteritems():
-                        if group == 'interval':
-                            if stat == 'seconds':
-                                this_interval_s = val
-                                continue
-                            elif not (this_interval_s > 0):
-                                logger.error(
-                                    "BUG? interval stat given with duration {!r}".
-                                    format(this_interval_s))
-                                continue
-                            else:
-                                stat = stat + '__rate'
-                                val = val / this_interval_s
-                                if stat in ['user+sys__rate', 'tx+rx__rate']:
-                                    task.series[category, stat].append(
-                                        (timestamp - self.starttime, val))
+                        if '.' in val:
+                            stats[stat] = float(val)
                         else:
-                            if stat in ['rss']:
+                            stats[stat] = int(val)
+                except ValueError as e:
+                    # If the line doesn't start with 'crunchstat:' we
+                    # might have mistaken an error message for a
+                    # structured crunchstat line.
+                    if m.group("crunchstat") is None or m.group("category") == "crunchstat":
+                        logger.warning("%s: log contains message\n  %s", self.label, line)
+                    else:
+                        logger.warning(
+                            '%s: Error parsing value %r (stat %r, category %r): %r',
+                            self.label, val, stat, category, e)
+                        logger.warning('%s', line)
+                    continue
+                if 'user' in stats or 'sys' in stats:
+                    stats['user+sys'] = stats.get('user', 0) + stats.get('sys', 0)
+                if 'tx' in stats or 'rx' in stats:
+                    stats['tx+rx'] = stats.get('tx', 0) + stats.get('rx', 0)
+                if group == 'interval':
+                    if 'seconds' in stats:
+                        this_interval_s = stats.get('seconds',0)
+                        del stats['seconds']
+                        if this_interval_s <= 0:
+                            logger.error(
+                                "BUG? interval stat given with duration {!r}".
+                                format(this_interval_s))
+                    else:
+                        logger.error('BUG? interval stat missing duration')
+                for stat, val in stats.items():
+                    if group == 'interval' and this_interval_s:
+                            stat = stat + '__rate'
+                            val = val / this_interval_s
+                            if stat in ['user+sys__rate', 'user__rate', 'sys__rate', 'tx+rx__rate', 'rx__rate', 'tx__rate']:
                                 task.series[category, stat].append(
                                     (timestamp - self.starttime, val))
-                            self.task_stats[task_id][category][stat] = val
-                        if val > self.stats_max[category][stat]:
-                            self.stats_max[category][stat] = val
-            except Exception as e:
-                logger.info('Skipping malformed line: {}Error was: {}\n'.format(line, e))
+                    else:
+                        if stat in ['rss','used','total']:
+                            task.series[category, stat].append(
+                                (timestamp - self.starttime, val))
+                        self.task_stats[task_id][category][stat] = val
+                    if val > self.stats_max[category][stat]:
+                        self.stats_max[category][stat] = val
         logger.debug('%s: done parsing', self.label)
 
         self.job_tot = collections.defaultdict(
             functools.partial(collections.defaultdict, int))
-        for task_id, task_stat in self.task_stats.iteritems():
-            for category, stat_last in task_stat.iteritems():
-                for stat, val in stat_last.iteritems():
+        for task_id, task_stat in self.task_stats.items():
+            for category, stat_last in task_stat.items():
+                for stat, val in stat_last.items():
                     if stat in ['cpus', 'cache', 'swap', 'rss']:
                         # meaningless stats like 16 cpu cores x 5 tasks = 80
                         continue
@@ -197,6 +247,8 @@ class Summarizer(object):
 
     def long_label(self):
         label = self.label
+        if hasattr(self, 'process') and self.process['uuid'] not in label:
+            label = '{} ({})'.format(label, self.process['uuid'])
         if self.finishtime:
             label += ' -- elapsed time '
             s = (self.finishtime - self.starttime).total_seconds()
@@ -217,12 +269,12 @@ class Summarizer(object):
             self._recommend_gen())) + "\n"
 
     def html_report(self):
-        return crunchstat_summary.chartjs.ChartJS(self.label, [self]).html()
+        return WEBCHART_CLASS(self.label, [self]).html()
 
     def _text_report_gen(self):
         yield "\t".join(['category', 'metric', 'task_max', 'task_max_rate', 'job_total'])
-        for category, stat_max in sorted(self.stats_max.iteritems()):
-            for stat, val in sorted(stat_max.iteritems()):
+        for category, stat_max in sorted(self.stats_max.items()):
+            for stat, val in sorted(stat_max.items()):
                 if stat.endswith('__rate'):
                     continue
                 max_rate = self._format(stat_max.get(stat+'__rate', '-'))
@@ -240,7 +292,7 @@ class Summarizer(object):
                  self.stats_max['cpu']['user+sys__rate'],
                  lambda x: x * 100),
                 ('Overall CPU usage: {}%',
-                 self.job_tot['cpu']['user+sys'] /
+                 float(self.job_tot['cpu']['user+sys']) /
                  self.job_tot['time']['elapsed']
                  if self.job_tot['time']['elapsed'] > 0 else 0,
                  lambda x: x * 100),
@@ -264,7 +316,13 @@ class Summarizer(object):
                  (float(self.job_tot['blkio:0:0']['read']) /
                  float(self.job_tot['net:keep0']['rx']))
                  if self.job_tot['net:keep0']['rx'] > 0 else 0,
-                 lambda x: x * 100.0)):
+                 lambda x: x * 100.0),
+               ('Temp disk utilization {}%',
+                 (float(self.job_tot['statfs']['used']) /
+                 float(self.job_tot['statfs']['total']))
+                 if self.job_tot['statfs']['total'] > 0 else 0,
+                 lambda x: x * 100.0),
+                ):
             format_string, val, transform = args
             if val == float('-Inf'):
                 continue
@@ -273,29 +331,40 @@ class Summarizer(object):
             yield "# "+format_string.format(self._format(val))
 
     def _recommend_gen(self):
+        # TODO recommend fixing job granularity if elapsed time is too short
         return itertools.chain(
             self._recommend_cpu(),
             self._recommend_ram(),
-            self._recommend_keep_cache())
+            self._recommend_keep_cache(),
+            self._recommend_temp_disk(),
+            )
 
     def _recommend_cpu(self):
         """Recommend asking for 4 cores if max CPU usage was 333%"""
 
+        constraint_key = self._map_runtime_constraint('vcpus')
         cpu_max_rate = self.stats_max['cpu']['user+sys__rate']
-        if cpu_max_rate == float('-Inf'):
+        if cpu_max_rate == float('-Inf') or cpu_max_rate == 0.0:
             logger.warning('%s: no CPU usage data', self.label)
             return
+        # TODO Don't necessarily want to recommend on isolated max peak
+        # take average CPU usage into account as well or % time at max
         used_cores = max(1, int(math.ceil(cpu_max_rate)))
-        asked_cores = self.existing_constraints.get('min_cores_per_node')
-        if asked_cores is None or used_cores < asked_cores:
+        asked_cores = self.existing_constraints.get(constraint_key)
+        if asked_cores is None:
+            asked_cores = 1
+        # TODO: This should be more nuanced in cases where max >> avg
+        if used_cores < asked_cores:
             yield (
                 '#!! {} max CPU usage was {}% -- '
-                'try runtime_constraints "min_cores_per_node":{}'
+                'try reducing runtime_constraints to "{}":{}'
             ).format(
                 self.label,
-                int(math.ceil(cpu_max_rate*100)),
+                math.ceil(cpu_max_rate*100),
+                constraint_key,
                 int(used_cores))
 
+    # FIXME: This needs to be updated to account for current a-d-c algorithms
     def _recommend_ram(self):
         """Recommend an economical RAM constraint for this job.
 
@@ -330,40 +399,60 @@ class Summarizer(object):
         the memory we want -- even if that happens to be 8192 MiB.
         """
 
+        constraint_key = self._map_runtime_constraint('ram')
         used_bytes = self.stats_max['mem']['rss']
         if used_bytes == float('-Inf'):
             logger.warning('%s: no memory usage data', self.label)
             return
-        used_mib = math.ceil(float(used_bytes) / 1048576)
-        asked_mib = self.existing_constraints.get('min_ram_mb_per_node')
+        used_mib = math.ceil(float(used_bytes) / MB)
+        asked_mib = self.existing_constraints.get(constraint_key)
 
         nearlygibs = lambda mebibytes: mebibytes/AVAILABLE_RAM_RATIO/1024
-        if asked_mib is None or (
-                math.ceil(nearlygibs(used_mib)) < nearlygibs(asked_mib)):
+        if used_mib > 0 and (asked_mib is None or (
+                math.ceil(nearlygibs(used_mib)) < nearlygibs(asked_mib))):
             yield (
                 '#!! {} max RSS was {} MiB -- '
-                'try runtime_constraints "min_ram_mb_per_node":{}'
+                'try reducing runtime_constraints to "{}":{}'
             ).format(
                 self.label,
                 int(used_mib),
-                int(math.ceil(nearlygibs(used_mib))*AVAILABLE_RAM_RATIO*1024))
+                constraint_key,
+                int(math.ceil(nearlygibs(used_mib))*AVAILABLE_RAM_RATIO*1024*(MB)/self._runtime_constraint_mem_unit()))
 
     def _recommend_keep_cache(self):
         """Recommend increasing keep cache if utilization < 80%"""
+        constraint_key = self._map_runtime_constraint('keep_cache_ram')
         if self.job_tot['net:keep0']['rx'] == 0:
             return
         utilization = (float(self.job_tot['blkio:0:0']['read']) /
                        float(self.job_tot['net:keep0']['rx']))
-        asked_mib = self.existing_constraints.get('keep_cache_mb_per_task', 256)
+        # FIXME: the default on this get won't work correctly
+        asked_cache = self.existing_constraints.get(constraint_key, 256) * self._runtime_constraint_mem_unit()
 
         if utilization < 0.8:
             yield (
                 '#!! {} Keep cache utilization was {:.2f}% -- '
-                'try runtime_constraints "keep_cache_mb_per_task":{} (or more)'
+                'try doubling runtime_constraints to "{}":{} (or more)'
             ).format(
                 self.label,
                 utilization * 100.0,
-                asked_mib*2)
+                constraint_key,
+                math.ceil(asked_cache * 2 / self._runtime_constraint_mem_unit()))
+
+
+    def _recommend_temp_disk(self):
+        """Recommend decreasing temp disk if utilization < 50%"""
+        total = float(self.job_tot['statfs']['total'])
+        utilization = (float(self.job_tot['statfs']['used']) / total) if total > 0 else 0.0
+
+        if utilization < 50.8 and total > 0:
+            yield (
+                '#!! {} max temp disk utilization was {:.0f}% of {:.0f} MiB -- '
+                'consider reducing "tmpdirMin" and/or "outdirMin"'
+            ).format(
+                self.label,
+                utilization * 100.0,
+                total / MB)
 
 
     def _format(self, val):
@@ -375,6 +464,22 @@ class Summarizer(object):
         else:
             return '{}'.format(val)
 
+    def _runtime_constraint_mem_unit(self):
+        if hasattr(self, 'runtime_constraint_mem_unit'):
+            return self.runtime_constraint_mem_unit
+        elif self.detected_crunch1:
+            return JobSummarizer.runtime_constraint_mem_unit
+        else:
+            return ContainerRequestSummarizer.runtime_constraint_mem_unit
+
+    def _map_runtime_constraint(self, key):
+        if hasattr(self, 'map_runtime_constraint'):
+            return self.map_runtime_constraint[key]
+        elif self.detected_crunch1:
+            return JobSummarizer.map_runtime_constraint[key]
+        else:
+            return key
+
 
 class CollectionSummarizer(Summarizer):
     def __init__(self, collection_id, **kwargs):
@@ -383,53 +488,98 @@ class CollectionSummarizer(Summarizer):
         self.label = collection_id
 
 
-class JobSummarizer(Summarizer):
-    def __init__(self, job, **kwargs):
-        arv = arvados.api('v1')
-        if isinstance(job, basestring):
-            self.job = arv.jobs().get(uuid=job).execute()
-        else:
-            self.job = job
+def NewSummarizer(process_or_uuid, **kwargs):
+    """Construct with the appropriate subclass for this uuid/object."""
+
+    if isinstance(process_or_uuid, dict):
+        process = process_or_uuid
+        uuid = process['uuid']
+    else:
+        uuid = process_or_uuid
+        process = None
+        arv = arvados.api('v1', model=OrderedJsonModel())
+
+    if '-dz642-' in uuid:
+        if process is None:
+            # Get the associated CR. Doesn't matter which since they all have the same logs
+            crs = arv.container_requests().list(filters=[['container_uuid','=',uuid]],limit=1).execute()['items']
+            if len(crs) > 0:
+                process = crs[0]
+        klass = ContainerRequestTreeSummarizer
+    elif '-xvhdp-' in uuid:
+        if process is None:
+            process = arv.container_requests().get(uuid=uuid).execute()
+        klass = ContainerRequestTreeSummarizer
+    elif '-8i9sb-' in uuid:
+        if process is None:
+            process = arv.jobs().get(uuid=uuid).execute()
+        klass = JobTreeSummarizer
+    elif '-d1hrv-' in uuid:
+        if process is None:
+            process = arv.pipeline_instances().get(uuid=uuid).execute()
+        klass = PipelineSummarizer
+    elif '-4zz18-' in uuid:
+        return CollectionSummarizer(collection_id=uuid)
+    else:
+        raise ArgumentError("Unrecognized uuid %s", uuid)
+    return klass(process, uuid=uuid, **kwargs)
+
+
+class ProcessSummarizer(Summarizer):
+    """Process is a job, pipeline, or container request."""
+
+    def __init__(self, process, label=None, **kwargs):
         rdr = None
-        if self.job.get('log'):
+        self.process = process
+        if label is None:
+            label = self.process.get('name', self.process['uuid'])
+        # Pre-Arvados v1.4 everything is in 'log'
+        # For 1.4+ containers have no logs and container_requests have them in 'log_uuid', not 'log'
+        log_collection = self.process.get('log', self.process.get('log_uuid'))
+        if log_collection and self.process.get('state') != 'Uncommitted': # arvados.util.CR_UNCOMMITTED:
             try:
-                rdr = crunchstat_summary.reader.CollectionReader(self.job['log'])
+                rdr = crunchstat_summary.reader.CollectionReader(log_collection)
             except arvados.errors.NotFoundError as e:
                 logger.warning("Trying event logs after failing to read "
-                               "log collection %s: %s", self.job['log'], e)
-            else:
-                label = self.job['uuid']
+                               "log collection %s: %s", self.process['log'], e)
         if rdr is None:
-            rdr = crunchstat_summary.reader.LiveLogReader(self.job['uuid'])
-            label = self.job['uuid'] + ' (partial)'
-        super(JobSummarizer, self).__init__(rdr, **kwargs)
-        self.label = label
-        self.existing_constraints = self.job.get('runtime_constraints', {})
+            uuid = self.process.get('container_uuid', self.process.get('uuid'))
+            rdr = crunchstat_summary.reader.LiveLogReader(uuid)
+            label = label + ' (partial)'
+        super(ProcessSummarizer, self).__init__(rdr, label=label, **kwargs)
+        self.existing_constraints = self.process.get('runtime_constraints', {})
 
 
-class PipelineSummarizer(object):
-    def __init__(self, pipeline_instance_uuid, **kwargs):
-        arv = arvados.api('v1', model=OrderedJsonModel())
-        instance = arv.pipeline_instances().get(
-            uuid=pipeline_instance_uuid).execute()
-        self.summarizers = collections.OrderedDict()
-        for cname, component in instance['components'].iteritems():
-            if 'job' not in component:
-                logger.warning(
-                    "%s: skipping component with no job assigned", cname)
-            else:
-                logger.info(
-                    "%s: job %s", cname, component['job']['uuid'])
-                summarizer = JobSummarizer(component['job'], **kwargs)
-                summarizer.label = '{} {}'.format(
-                    cname, component['job']['uuid'])
-                self.summarizers[cname] = summarizer
-        self.label = pipeline_instance_uuid
+class JobSummarizer(ProcessSummarizer):
+    runtime_constraint_mem_unit = MB
+    map_runtime_constraint = {
+        'keep_cache_ram': 'keep_cache_mb_per_task',
+        'ram': 'min_ram_mb_per_node',
+        'vcpus': 'min_cores_per_node',
+    }
+
+
+class ContainerRequestSummarizer(ProcessSummarizer):
+    runtime_constraint_mem_unit = 1
+
+
+class MultiSummarizer(object):
+    def __init__(self, children={}, label=None, threads=1, **kwargs):
+        self.throttle = threading.Semaphore(threads)
+        self.children = children
+        self.label = label
+
+    def run_and_release(self, target, *args, **kwargs):
+        try:
+            return target(*args, **kwargs)
+        finally:
+            self.throttle.release()
 
     def run(self):
         threads = []
-        for summarizer in self.summarizers.itervalues():
-            t = threading.Thread(target=summarizer.run)
+        for child in self.children.values():
+            self.throttle.acquire()
+            t = threading.Thread(target=self.run_and_release, args=(child.run, ))
             t.daemon = True
             t.start()
             threads.append(t)
@@ -438,13 +588,120 @@ class PipelineSummarizer(object):
 
     def text_report(self):
         txt = ''
-        for cname, summarizer in self.summarizers.iteritems():
-            txt += '### Summary for {} ({})\n'.format(
-                cname, summarizer.job['uuid'])
-            txt += summarizer.text_report()
+        d = self._descendants()
+        for child in d.values():
+            if len(d) > 1:
+                txt += '### Summary for {} ({})\n'.format(
+                    child.label, child.process['uuid'])
+            txt += child.text_report()
             txt += '\n'
         return txt
 
+    def _descendants(self):
+        """Dict of self and all descendants.
+
+        Nodes with nothing of their own to report (like
+        MultiSummarizers) are omitted.
+        """
+        d = collections.OrderedDict()
+        for key, child in self.children.items():
+            if isinstance(child, Summarizer):
+                d[key] = child
+            if isinstance(child, MultiSummarizer):
+                d.update(child._descendants())
+        return d
+
     def html_report(self):
-        return crunchstat_summary.chartjs.ChartJS(
-            self.label, self.summarizers.itervalues()).html()
+        return WEBCHART_CLASS(self.label, iter(self._descendants().values())).html()
+
+
+class JobTreeSummarizer(MultiSummarizer):
+    """Summarizes a job and all children listed in its components field."""
+    def __init__(self, job, label=None, **kwargs):
+        arv = arvados.api('v1', model=OrderedJsonModel())
+        label = label or job.get('name', job['uuid'])
+        children = collections.OrderedDict()
+        children[job['uuid']] = JobSummarizer(job, label=label, **kwargs)
+        if job.get('components', None):
+            preloaded = {}
+            for j in arv.jobs().index(
+                    limit=len(job['components']),
+                    filters=[['uuid','in',list(job['components'].values())]]).execute()['items']:
+                preloaded[j['uuid']] = j
+            for cname in sorted(job['components'].keys()):
+                child_uuid = job['components'][cname]
+                j = (preloaded.get(child_uuid) or
+                     arv.jobs().get(uuid=child_uuid).execute())
+                children[child_uuid] = JobTreeSummarizer(job=j, label=cname, **kwargs)
+
+        super(JobTreeSummarizer, self).__init__(
+            children=children,
+            label=label,
+            **kwargs)
+
+
+class PipelineSummarizer(MultiSummarizer):
+    def __init__(self, instance, **kwargs):
+        children = collections.OrderedDict()
+        for cname, component in instance['components'].items():
+            if 'job' not in component:
+                logger.warning(
+                    "%s: skipping component with no job assigned", cname)
+            else:
+                logger.info(
+                    "%s: job %s", cname, component['job']['uuid'])
+                summarizer = JobTreeSummarizer(component['job'], label=cname, **kwargs)
+                summarizer.label = '{} {}'.format(
+                    cname, component['job']['uuid'])
+                children[cname] = summarizer
+        super(PipelineSummarizer, self).__init__(
+            children=children,
+            label=instance['uuid'],
+            **kwargs)
+
+
+class ContainerRequestTreeSummarizer(MultiSummarizer):
+    def __init__(self, root, skip_child_jobs=False, **kwargs):
+        arv = arvados.api('v1', model=OrderedJsonModel())
+
+        label = kwargs.pop('label', None) or root.get('name') or root['uuid']
+        root['name'] = label
+
+        children = collections.OrderedDict()
+        todo = collections.deque((root, ))
+        while len(todo) > 0:
+            current = todo.popleft()
+            label = current['name']
+            sort_key = current['created_at']
+
+            summer = ContainerRequestSummarizer(current, label=label, **kwargs)
+            summer.sort_key = sort_key
+            children[current['uuid']] = summer
+
+            page_filters = []
+            while True:
+                child_crs = arv.container_requests().index(
+                    order=['uuid asc'],
+                    filters=page_filters+[
+                        ['requesting_container_uuid', '=', current['container_uuid']]],
+                ).execute()
+                if not child_crs['items']:
+                    break
+                elif skip_child_jobs:
+                    logger.warning('%s: omitting stats from %d child containers'
+                                   ' because --skip-child-jobs flag is on',
+                                   label, child_crs['items_available'])
+                    break
+                page_filters = [['uuid', '>', child_crs['items'][-1]['uuid']]]
+                for cr in child_crs['items']:
+                    if cr['container_uuid']:
+                        logger.debug('%s: container req %s', current['uuid'], cr['uuid'])
+                        cr['name'] = cr.get('name') or cr['uuid']
+                        todo.append(cr)
+        sorted_children = collections.OrderedDict()
+        for uuid in sorted(list(children.keys()), key=lambda uuid: children[uuid].sort_key):
+            sorted_children[uuid] = children[uuid]
+        super(ContainerRequestTreeSummarizer, self).__init__(
+            children=sorted_children,
+            label=root['name'],
+            **kwargs)