Merge branch '8784-dir-listings'
[arvados.git] / tools / crunchstat-summary / crunchstat_summary / summarizer.py
index 65160184d6a48739df7c3ecf552483bb58310ad0..9b8410e9aafcb9a6b22fbe8aa21a0aa4bb0a27d7 100644 (file)
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
 from __future__ import print_function
 
 import arvados
-import gzip
+import collections
+import crunchstat_summary.chartjs
+import crunchstat_summary.reader
+import datetime
+import functools
+import itertools
+import math
 import re
 import sys
+import threading
+import _strptime
+
+from arvados.api import OrderedJsonModel
+from crunchstat_summary import logger
+
+# Recommend memory constraints that are this multiple of an integral
+# number of GiB. (Actual nodes tend to be sold in sizes like 8 GiB
+# that have amounts like 7.5 GiB according to the kernel.)
+AVAILABLE_RAM_RATIO = 0.95
+
+
+# Workaround datetime.datetime.strptime() thread-safety bug by calling
+# it once before starting threads.  https://bugs.python.org/issue7980
+datetime.datetime.strptime('1999-12-31_23:59:59', '%Y-%m-%d_%H:%M:%S')
+
+
+class Task(object):
+    def __init__(self):
+        self.starttime = None
+        self.series = collections.defaultdict(list)
 
 
 class Summarizer(object):
-    def __init__(self, args):
-        self.args = args
+    def __init__(self, logdata, label=None, skip_child_jobs=False):
+        self._logdata = logdata
+
+        self.label = label
+        self.starttime = None
+        self.finishtime = None
+        self._skip_child_jobs = skip_child_jobs
+
+        # stats_max: {category: {stat: val}}
+        self.stats_max = collections.defaultdict(
+            functools.partial(collections.defaultdict, lambda: 0))
+        # task_stats: {task_id: {category: {stat: val}}}
+        self.task_stats = collections.defaultdict(
+            functools.partial(collections.defaultdict, dict))
+
+        self.seq_to_uuid = {}
+        self.tasks = collections.defaultdict(Task)
+
+        # We won't bother recommending new runtime constraints if the
+        # constraints given when running the job are known to us and
+        # are already suitable.  If applicable, the subclass
+        # constructor will overwrite this with something useful.
+        self.existing_constraints = {}
+
+        logger.debug("%s: logdata %s", self.label, logdata)
 
     def run(self):
-        stats_max = {}
-        for line in self._logdata():
-            m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) stderr crunchstat: (?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n', line)
-            if not m:
+        logger.debug("%s: parsing logdata %s", self.label, self._logdata)
+        for line in self._logdata:
+            m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) job_task (?P<task_uuid>\S+)$', line)
+            if m:
+                seq = int(m.group('seq'))
+                uuid = m.group('task_uuid')
+                self.seq_to_uuid[seq] = uuid
+                logger.debug('%s: seq %d is task %s', self.label, seq, uuid)
                 continue
-            if m.group('category').endswith(':'):
-                # "notice:" etc.
+
+            m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) (success in|failure \(#., permanent\) after) (?P<elapsed>\d+) seconds', line)
+            if m:
+                task_id = self.seq_to_uuid[int(m.group('seq'))]
+                elapsed = int(m.group('elapsed'))
+                self.task_stats[task_id]['time'] = {'elapsed': elapsed}
+                if elapsed > self.stats_max['time']['elapsed']:
+                    self.stats_max['time']['elapsed'] = elapsed
                 continue
-            this_interval_s = None
-            for group in ['current', 'interval']:
-                if not m.group(group):
+
+            m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) stderr Queued job (?P<uuid>\S+)$', line)
+            if m:
+                uuid = m.group('uuid')
+                if self._skip_child_jobs:
+                    logger.warning('%s: omitting stats from child job %s'
+                                   ' because --skip-child-jobs flag is on',
+                                   self.label, uuid)
                     continue
-                category = m.group('category')
-                if category not in stats_max:
-                    stats_max[category] = {}
-                words = m.group(group).split(' ')
-                for val, stat in zip(words[::2], words[1::2]):
-                    if '.' in val:
-                        val = float(val)
-                    else:
-                        val = int(val)
-                    if group == 'interval':
-                        if stat == 'seconds':
-                            this_interval_s = val
-                            continue
-                        elif not (this_interval_s > 0):
-                            print("BUG? interval stat given with duration {!r}".
-                                  format(this_interval_s),
-                                  file=sys.stderr)
-                            continue
+                logger.debug('%s: follow %s', self.label, uuid)
+                child_summarizer = JobSummarizer(uuid)
+                child_summarizer.stats_max = self.stats_max
+                child_summarizer.task_stats = self.task_stats
+                child_summarizer.tasks = self.tasks
+                child_summarizer.starttime = self.starttime
+                child_summarizer.run()
+                logger.debug('%s: done %s', self.label, uuid)
+                continue
+
+            m = re.search(r'^(?P<timestamp>[^\s.]+)(\.\d+)? (?P<job_uuid>\S+) \d+ (?P<seq>\d+) stderr crunchstat: (?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n', line)
+            if not m:
+                continue
+
+            try:
+                if self.label is None:
+                    self.label = m.group('job_uuid')
+                    logger.debug('%s: using job uuid as label', self.label)
+                if m.group('category').endswith(':'):
+                    # "stderr crunchstat: notice: ..."
+                    continue
+                elif m.group('category') in ('error', 'caught'):
+                    continue
+                elif m.group('category') == 'read':
+                    # "stderr crunchstat: read /proc/1234/net/dev: ..."
+                    # (crunchstat formatting fixed, but old logs still say this)
+                    continue
+                task_id = self.seq_to_uuid[int(m.group('seq'))]
+                task = self.tasks[task_id]
+
+                # Use the first and last crunchstat timestamps as
+                # approximations of starttime and finishtime.
+                timestamp = datetime.datetime.strptime(
+                    m.group('timestamp'), '%Y-%m-%d_%H:%M:%S')
+                if not task.starttime:
+                    task.starttime = timestamp
+                    logger.debug('%s: task %s starttime %s',
+                                 self.label, task_id, timestamp)
+                task.finishtime = timestamp
+
+                if not self.starttime:
+                    self.starttime = timestamp
+                self.finishtime = timestamp
+
+                this_interval_s = None
+                for group in ['current', 'interval']:
+                    if not m.group(group):
+                        continue
+                    category = m.group('category')
+                    words = m.group(group).split(' ')
+                    stats = {}
+                    for val, stat in zip(words[::2], words[1::2]):
+                        try:
+                            if '.' in val:
+                                stats[stat] = float(val)
+                            else:
+                                stats[stat] = int(val)
+                        except ValueError as e:
+                            raise ValueError(
+                                'Error parsing {} stat: {!r}'.format(
+                                    stat, e))
+                    if 'user' in stats or 'sys' in stats:
+                        stats['user+sys'] = stats.get('user', 0) + stats.get('sys', 0)
+                    if 'tx' in stats or 'rx' in stats:
+                        stats['tx+rx'] = stats.get('tx', 0) + stats.get('rx', 0)
+                    for stat, val in stats.iteritems():
+                        if group == 'interval':
+                            if stat == 'seconds':
+                                this_interval_s = val
+                                continue
+                            elif not (this_interval_s > 0):
+                                logger.error(
+                                    "BUG? interval stat given with duration {!r}".
+                                    format(this_interval_s))
+                                continue
+                            else:
+                                stat = stat + '__rate'
+                                val = val / this_interval_s
+                                if stat in ['user+sys__rate', 'tx+rx__rate']:
+                                    task.series[category, stat].append(
+                                        (timestamp - self.starttime, val))
                         else:
-                            stat = stat + '__rate'
-                            val = val / this_interval_s
-                    if val > stats_max[category].get(stat, float('-Inf')):
-                        stats_max[category][stat] = val
-        self.stats_max = stats_max
-
-    def report(self):
-        return "\n".join(self._report_gen()) + "\n"
-
-    def _report_gen(self):
-        yield "\t".join(['category', 'metric', 'max', 'max_rate'])
-        for category, stat_max in self.stats_max.iteritems():
-            for stat, val in stat_max.iteritems():
+                            if stat in ['rss']:
+                                task.series[category, stat].append(
+                                    (timestamp - self.starttime, val))
+                            self.task_stats[task_id][category][stat] = val
+                        if val > self.stats_max[category][stat]:
+                            self.stats_max[category][stat] = val
+            except Exception as e:
+                logger.info('Skipping malformed line: {}Error was: {}\n'.format(line, e))
+        logger.debug('%s: done parsing', self.label)
+
+        self.job_tot = collections.defaultdict(
+            functools.partial(collections.defaultdict, int))
+        for task_id, task_stat in self.task_stats.iteritems():
+            for category, stat_last in task_stat.iteritems():
+                for stat, val in stat_last.iteritems():
+                    if stat in ['cpus', 'cache', 'swap', 'rss']:
+                        # meaningless stats like 16 cpu cores x 5 tasks = 80
+                        continue
+                    self.job_tot[category][stat] += val
+        logger.debug('%s: done totals', self.label)
+
+    def long_label(self):
+        label = self.label
+        if self.finishtime:
+            label += ' -- elapsed time '
+            s = (self.finishtime - self.starttime).total_seconds()
+            if s > 86400:
+                label += '{}d'.format(int(s/86400))
+            if s > 3600:
+                label += '{}h'.format(int(s/3600) % 24)
+            if s > 60:
+                label += '{}m'.format(int(s/60) % 60)
+            label += '{}s'.format(int(s) % 60)
+        return label
+
+    def text_report(self):
+        if not self.tasks:
+            return "(no report generated)\n"
+        return "\n".join(itertools.chain(
+            self._text_report_gen(),
+            self._recommend_gen())) + "\n"
+
+    def html_report(self):
+        return crunchstat_summary.chartjs.ChartJS(self.label, [self]).html()
+
+    def _text_report_gen(self):
+        yield "\t".join(['category', 'metric', 'task_max', 'task_max_rate', 'job_total'])
+        for category, stat_max in sorted(self.stats_max.iteritems()):
+            for stat, val in sorted(stat_max.iteritems()):
                 if stat.endswith('__rate'):
                     continue
-                if stat+'__rate' in stat_max:
-                    max_rate = '{:.2f}'.format(stat_max[stat+'__rate'])
-                else:
-                    max_rate = '-'
-                if isinstance(val, float):
-                    val = '{:.2f}'.format(val)
-                yield "\t".join([category, stat, str(val), max_rate])
-
-    def _logdata(self):
-        if self.args.log_file:
-            if self.args.log_file.endswith('.gz'):
-                return gzip.open(self.args.log_file)
-            else:
-                return open(self.args.log_file)
-        elif self.args.job:
-            arv = arvados.api('v1')
-            job = arv.jobs().get(uuid=self.args.job).execute()
-            if not job['log']:
-                raise ValueError(
-                    "job {} has no log; live summary not implemented".format(
-                        self.args.job))
-            collection = arvados.collection.CollectionReader(job['log'])
-            filenames = [filename for filename in collection]
-            if len(filenames) != 1:
-                raise ValueError(
-                    "collection {} has {} files; need exactly one".format(
-                        job.log, len(filenames)))
-            return collection.open(filenames[0])
+                max_rate = self._format(stat_max.get(stat+'__rate', '-'))
+                val = self._format(val)
+                tot = self._format(self.job_tot[category].get(stat, '-'))
+                yield "\t".join([category, stat, str(val), max_rate, tot])
+        for args in (
+                ('Number of tasks: {}',
+                 len(self.tasks),
+                 None),
+                ('Max CPU time spent by a single task: {}s',
+                 self.stats_max['cpu']['user+sys'],
+                 None),
+                ('Max CPU usage in a single interval: {}%',
+                 self.stats_max['cpu']['user+sys__rate'],
+                 lambda x: x * 100),
+                ('Overall CPU usage: {}%',
+                 self.job_tot['cpu']['user+sys'] /
+                 self.job_tot['time']['elapsed']
+                 if self.job_tot['time']['elapsed'] > 0 else 0,
+                 lambda x: x * 100),
+                ('Max memory used by a single task: {}GB',
+                 self.stats_max['mem']['rss'],
+                 lambda x: x / 1e9),
+                ('Max network traffic in a single task: {}GB',
+                 self.stats_max['net:eth0']['tx+rx'] +
+                 self.stats_max['net:keep0']['tx+rx'],
+                 lambda x: x / 1e9),
+                ('Max network speed in a single interval: {}MB/s',
+                 self.stats_max['net:eth0']['tx+rx__rate'] +
+                 self.stats_max['net:keep0']['tx+rx__rate'],
+                 lambda x: x / 1e6),
+                ('Keep cache miss rate {}%',
+                 (float(self.job_tot['keepcache']['miss']) /
+                 float(self.job_tot['keepcalls']['get']))
+                 if self.job_tot['keepcalls']['get'] > 0 else 0,
+                 lambda x: x * 100.0),
+                ('Keep cache utilization {}%',
+                 (float(self.job_tot['blkio:0:0']['read']) /
+                 float(self.job_tot['net:keep0']['rx']))
+                 if self.job_tot['net:keep0']['rx'] > 0 else 0,
+                 lambda x: x * 100.0)):
+            format_string, val, transform = args
+            if val == float('-Inf'):
+                continue
+            if transform:
+                val = transform(val)
+            yield "# "+format_string.format(self._format(val))
+
+    def _recommend_gen(self):
+        return itertools.chain(
+            self._recommend_cpu(),
+            self._recommend_ram(),
+            self._recommend_keep_cache())
+
+    def _recommend_cpu(self):
+        """Recommend asking for 4 cores if max CPU usage was 333%"""
+
+        cpu_max_rate = self.stats_max['cpu']['user+sys__rate']
+        if cpu_max_rate == float('-Inf'):
+            logger.warning('%s: no CPU usage data', self.label)
+            return
+        used_cores = max(1, int(math.ceil(cpu_max_rate)))
+        asked_cores = self.existing_constraints.get('min_cores_per_node')
+        if asked_cores is None or used_cores < asked_cores:
+            yield (
+                '#!! {} max CPU usage was {}% -- '
+                'try runtime_constraints "min_cores_per_node":{}'
+            ).format(
+                self.label,
+                int(math.ceil(cpu_max_rate*100)),
+                int(used_cores))
+
+    def _recommend_ram(self):
+        """Recommend an economical RAM constraint for this job.
+
+        Nodes that are advertised as "8 gibibytes" actually have what
+        we might call "8 nearlygibs" of memory available for jobs.
+        Here, we calculate a whole number of nearlygibs that would
+        have sufficed to run the job, then recommend requesting a node
+        with that number of nearlygibs (expressed as mebibytes).
+
+        Requesting a node with "nearly 8 gibibytes" is our best hope
+        of getting a node that actually has nearly 8 gibibytes
+        available.  If the node manager is smart enough to account for
+        the discrepancy itself when choosing/creating a node, we'll
+        get an 8 GiB node with nearly 8 GiB available.  Otherwise, the
+        advertised size of the next-size-smaller node (say, 6 GiB)
+        will be too low to satisfy our request, so we will effectively
+        get rounded up to 8 GiB.
+
+        For example, if we need 7500 MiB, we can ask for 7500 MiB, and
+        we will generally get a node that is advertised as "8 GiB" and
+        has at least 7500 MiB available.  However, asking for 8192 MiB
+        would either result in an unnecessarily expensive 12 GiB node
+        (if node manager knows about the discrepancy), or an 8 GiB
+        node which has less than 8192 MiB available and is therefore
+        considered by crunch-dispatch to be too small to meet our
+        constraint.
+
+        When node manager learns how to predict the available memory
+        for each node type such that crunch-dispatch always agrees
+        that a node is big enough to run the job it was brought up
+        for, all this will be unnecessary.  We'll just ask for exactly
+        the memory we want -- even if that happens to be 8192 MiB.
+        """
+
+        used_bytes = self.stats_max['mem']['rss']
+        if used_bytes == float('-Inf'):
+            logger.warning('%s: no memory usage data', self.label)
+            return
+        used_mib = math.ceil(float(used_bytes) / 1048576)
+        asked_mib = self.existing_constraints.get('min_ram_mb_per_node')
+
+        nearlygibs = lambda mebibytes: mebibytes/AVAILABLE_RAM_RATIO/1024
+        if asked_mib is None or (
+                math.ceil(nearlygibs(used_mib)) < nearlygibs(asked_mib)):
+            yield (
+                '#!! {} max RSS was {} MiB -- '
+                'try runtime_constraints "min_ram_mb_per_node":{}'
+            ).format(
+                self.label,
+                int(used_mib),
+                int(math.ceil(nearlygibs(used_mib))*AVAILABLE_RAM_RATIO*1024))
+
+    def _recommend_keep_cache(self):
+        """Recommend increasing keep cache if utilization < 80%"""
+        if self.job_tot['net:keep0']['rx'] == 0:
+            return
+        utilization = (float(self.job_tot['blkio:0:0']['read']) /
+                       float(self.job_tot['net:keep0']['rx']))
+        asked_mib = self.existing_constraints.get('keep_cache_mb_per_task', 256)
+
+        if utilization < 0.8:
+            yield (
+                '#!! {} Keep cache utilization was {:.2f}% -- '
+                'try runtime_constraints "keep_cache_mb_per_task":{} (or more)'
+            ).format(
+                self.label,
+                utilization * 100.0,
+                asked_mib*2)
+
+
+    def _format(self, val):
+        """Return a string representation of a stat.
+
+        {:.2f} for floats, default format for everything else."""
+        if isinstance(val, float):
+            return '{:.2f}'.format(val)
+        else:
+            return '{}'.format(val)
+
+
+class CollectionSummarizer(Summarizer):
+    def __init__(self, collection_id, **kwargs):
+        super(CollectionSummarizer, self).__init__(
+            crunchstat_summary.reader.CollectionReader(collection_id), **kwargs)
+        self.label = collection_id
+
+
+class JobSummarizer(Summarizer):
+    def __init__(self, job, **kwargs):
+        arv = arvados.api('v1')
+        if isinstance(job, basestring):
+            self.job = arv.jobs().get(uuid=job).execute()
         else:
-            return sys.stdin
+            self.job = job
+        rdr = None
+        if self.job.get('log'):
+            try:
+                rdr = crunchstat_summary.reader.CollectionReader(self.job['log'])
+            except arvados.errors.NotFoundError as e:
+                logger.warning("Trying event logs after failing to read "
+                               "log collection %s: %s", self.job['log'], e)
+            else:
+                label = self.job['uuid']
+        if rdr is None:
+            rdr = crunchstat_summary.reader.LiveLogReader(self.job['uuid'])
+            label = self.job['uuid'] + ' (partial)'
+        super(JobSummarizer, self).__init__(rdr, **kwargs)
+        self.label = label
+        self.existing_constraints = self.job.get('runtime_constraints', {})
+
+
+class PipelineSummarizer(object):
+    def __init__(self, pipeline_instance_uuid, **kwargs):
+        arv = arvados.api('v1', model=OrderedJsonModel())
+        instance = arv.pipeline_instances().get(
+            uuid=pipeline_instance_uuid).execute()
+        self.summarizers = collections.OrderedDict()
+        for cname, component in instance['components'].iteritems():
+            if 'job' not in component:
+                logger.warning(
+                    "%s: skipping component with no job assigned", cname)
+            else:
+                logger.info(
+                    "%s: job %s", cname, component['job']['uuid'])
+                summarizer = JobSummarizer(component['job'], **kwargs)
+                summarizer.label = '{} {}'.format(
+                    cname, component['job']['uuid'])
+                self.summarizers[cname] = summarizer
+        self.label = pipeline_instance_uuid
+
+    def run(self):
+        threads = []
+        for summarizer in self.summarizers.itervalues():
+            t = threading.Thread(target=summarizer.run)
+            t.daemon = True
+            t.start()
+            threads.append(t)
+        for t in threads:
+            t.join()
+
+    def text_report(self):
+        txt = ''
+        for cname, summarizer in self.summarizers.iteritems():
+            txt += '### Summary for {} ({})\n'.format(
+                cname, summarizer.job['uuid'])
+            txt += summarizer.text_report()
+            txt += '\n'
+        return txt
+
+    def html_report(self):
+        return crunchstat_summary.chartjs.ChartJS(
+            self.label, self.summarizers.itervalues()).html()