Fix test Python 3 integer divide difference.
[arvados.git] / tools / crunchstat-summary / crunchstat_summary / summarizer.py
1 # Copyright (C) The Arvados Authors. All rights reserved.
2 #
3 # SPDX-License-Identifier: AGPL-3.0
4
5 import arvados
6 import collections
7 import crunchstat_summary.dygraphs
8 import crunchstat_summary.reader
9 import datetime
10 import functools
11 import itertools
12 import math
13 import re
14 import sys
15 import threading
16 import _strptime
17
18 from arvados.api import OrderedJsonModel
19 from crunchstat_summary import logger
20
21 # Recommend memory constraints that are this multiple of an integral
22 # number of GiB. (Actual nodes tend to be sold in sizes like 8 GiB
23 # that have amounts like 7.5 GiB according to the kernel.)
24 AVAILABLE_RAM_RATIO = 0.95
25
26
27 # Workaround datetime.datetime.strptime() thread-safety bug by calling
28 # it once before starting threads.  https://bugs.python.org/issue7980
29 datetime.datetime.strptime('1999-12-31_23:59:59', '%Y-%m-%d_%H:%M:%S')
30
31
32 WEBCHART_CLASS = crunchstat_summary.dygraphs.DygraphsChart
33
34
35 class Task(object):
36     def __init__(self):
37         self.starttime = None
38         self.finishtime = None
39         self.series = collections.defaultdict(list)
40
41
42 class Summarizer(object):
43     def __init__(self, logdata, label=None, skip_child_jobs=False, uuid=None, **kwargs):
44         self._logdata = logdata
45
46         self.uuid = uuid
47         self.label = label
48         self.starttime = None
49         self.finishtime = None
50         self._skip_child_jobs = skip_child_jobs
51
52         # stats_max: {category: {stat: val}}
53         self.stats_max = collections.defaultdict(
54             functools.partial(collections.defaultdict, lambda: 0))
55         # task_stats: {task_id: {category: {stat: val}}}
56         self.task_stats = collections.defaultdict(
57             functools.partial(collections.defaultdict, dict))
58
59         self.seq_to_uuid = {}
60         self.tasks = collections.defaultdict(Task)
61
62         # We won't bother recommending new runtime constraints if the
63         # constraints given when running the job are known to us and
64         # are already suitable.  If applicable, the subclass
65         # constructor will overwrite this with something useful.
66         self.existing_constraints = {}
67
68         logger.debug("%s: logdata %s", self.label, logdata)
69
70     def run(self):
71         logger.debug("%s: parsing logdata %s", self.label, self._logdata)
72         with self._logdata as logdata:
73             self._run(logdata)
74
75     def _run(self, logdata):
76         self.detected_crunch1 = False
77         for line in logdata:
78             if not self.detected_crunch1 and '-8i9sb-' in line:
79                 self.detected_crunch1 = True
80
81             if self.detected_crunch1:
82                 m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) job_task (?P<task_uuid>\S+)$', line)
83                 if m:
84                     seq = int(m.group('seq'))
85                     uuid = m.group('task_uuid')
86                     self.seq_to_uuid[seq] = uuid
87                     logger.debug('%s: seq %d is task %s', self.label, seq, uuid)
88                     continue
89
90                 m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) (success in|failure \(#., permanent\) after) (?P<elapsed>\d+) seconds', line)
91                 if m:
92                     task_id = self.seq_to_uuid[int(m.group('seq'))]
93                     elapsed = int(m.group('elapsed'))
94                     self.task_stats[task_id]['time'] = {'elapsed': elapsed}
95                     if elapsed > self.stats_max['time']['elapsed']:
96                         self.stats_max['time']['elapsed'] = elapsed
97                     continue
98
99                 m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) stderr Queued job (?P<uuid>\S+)$', line)
100                 if m:
101                     uuid = m.group('uuid')
102                     if self._skip_child_jobs:
103                         logger.warning('%s: omitting stats from child job %s'
104                                        ' because --skip-child-jobs flag is on',
105                                        self.label, uuid)
106                         continue
107                     logger.debug('%s: follow %s', self.label, uuid)
108                     child_summarizer = ProcessSummarizer(uuid)
109                     child_summarizer.stats_max = self.stats_max
110                     child_summarizer.task_stats = self.task_stats
111                     child_summarizer.tasks = self.tasks
112                     child_summarizer.starttime = self.starttime
113                     child_summarizer.run()
114                     logger.debug('%s: done %s', self.label, uuid)
115                     continue
116
117                 # 2017-12-02_17:15:08 e51c5-8i9sb-mfp68stkxnqdd6m 63676 0 stderr crunchstat: keepcalls 0 put 2576 get -- interval 10.0000 seconds 0 put 2576 get
118                 m = re.search(r'^(?P<timestamp>[^\s.]+)(\.\d+)? (?P<job_uuid>\S+) \d+ (?P<seq>\d+) stderr (?P<crunchstat>crunchstat: )(?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n$', line)
119                 if not m:
120                     continue
121             else:
122                 # crunch2
123                 # 2017-12-01T16:56:24.723509200Z crunchstat: keepcalls 0 put 3 get -- interval 10.0000 seconds 0 put 3 get
124                 m = re.search(r'^(?P<timestamp>\S+) (?P<crunchstat>crunchstat: )?(?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n$', line)
125                 if not m:
126                     continue
127
128             if self.label is None:
129                 try:
130                     self.label = m.group('job_uuid')
131                 except IndexError:
132                     self.label = 'container'
133             if m.group('category').endswith(':'):
134                 # "stderr crunchstat: notice: ..."
135                 continue
136             elif m.group('category') in ('error', 'caught'):
137                 continue
138             elif m.group('category') in ('read', 'open', 'cgroup', 'CID', 'Running'):
139                 # "stderr crunchstat: read /proc/1234/net/dev: ..."
140                 # (old logs are less careful with unprefixed error messages)
141                 continue
142
143             if self.detected_crunch1:
144                 task_id = self.seq_to_uuid[int(m.group('seq'))]
145             else:
146                 task_id = 'container'
147             task = self.tasks[task_id]
148
149             # Use the first and last crunchstat timestamps as
150             # approximations of starttime and finishtime.
151             timestamp = m.group('timestamp')
152             if timestamp[10:11] == '_':
153                 timestamp = datetime.datetime.strptime(
154                     timestamp, '%Y-%m-%d_%H:%M:%S')
155             elif timestamp[10:11] == 'T':
156                 timestamp = datetime.datetime.strptime(
157                     timestamp[:19], '%Y-%m-%dT%H:%M:%S')
158             else:
159                 raise ValueError("Cannot parse timestamp {!r}".format(
160                     timestamp))
161
162             if task.starttime is None:
163                 logger.debug('%s: task %s starttime %s',
164                              self.label, task_id, timestamp)
165             if task.starttime is None or timestamp < task.starttime:
166                 task.starttime = timestamp
167             if task.finishtime is None or timestamp > task.finishtime:
168                 task.finishtime = timestamp
169
170             if self.starttime is None or timestamp < task.starttime:
171                 self.starttime = timestamp
172             if self.finishtime is None or timestamp < task.finishtime:
173                 self.finishtime = timestamp
174
175             if (not self.detected_crunch1) and task.starttime is not None and task.finishtime is not None:
176                 elapsed = (task.finishtime - task.starttime).seconds
177                 self.task_stats[task_id]['time'] = {'elapsed': elapsed}
178                 if elapsed > self.stats_max['time']['elapsed']:
179                     self.stats_max['time']['elapsed'] = elapsed
180
181             this_interval_s = None
182             for group in ['current', 'interval']:
183                 if not m.group(group):
184                     continue
185                 category = m.group('category')
186                 words = m.group(group).split(' ')
187                 stats = {}
188                 try:
189                     for val, stat in zip(words[::2], words[1::2]):
190                         if '.' in val:
191                             stats[stat] = float(val)
192                         else:
193                             stats[stat] = int(val)
194                 except ValueError as e:
195                     # If the line doesn't start with 'crunchstat:' we
196                     # might have mistaken an error message for a
197                     # structured crunchstat line.
198                     if m.group("crunchstat") is None or m.group("category") == "crunchstat":
199                         logger.warning("%s: log contains message\n  %s", self.label, line)
200                     else:
201                         logger.warning(
202                             '%s: Error parsing value %r (stat %r, category %r): %r',
203                             self.label, val, stat, category, e)
204                         logger.warning('%s', line)
205                     continue
206                 if 'user' in stats or 'sys' in stats:
207                     stats['user+sys'] = stats.get('user', 0) + stats.get('sys', 0)
208                 if 'tx' in stats or 'rx' in stats:
209                     stats['tx+rx'] = stats.get('tx', 0) + stats.get('rx', 0)
210                 for stat, val in stats.items():
211                     if group == 'interval':
212                         if stat == 'seconds':
213                             this_interval_s = val
214                             continue
215                         elif not (this_interval_s > 0):
216                             logger.error(
217                                 "BUG? interval stat given with duration {!r}".
218                                 format(this_interval_s))
219                             continue
220                         else:
221                             stat = stat + '__rate'
222                             val = val / this_interval_s
223                             if stat in ['user+sys__rate', 'tx+rx__rate']:
224                                 task.series[category, stat].append(
225                                     (timestamp - self.starttime, val))
226                     else:
227                         if stat in ['rss']:
228                             task.series[category, stat].append(
229                                 (timestamp - self.starttime, val))
230                         self.task_stats[task_id][category][stat] = val
231                     if val > self.stats_max[category][stat]:
232                         self.stats_max[category][stat] = val
233         logger.debug('%s: done parsing', self.label)
234
235         self.job_tot = collections.defaultdict(
236             functools.partial(collections.defaultdict, int))
237         for task_id, task_stat in self.task_stats.items():
238             for category, stat_last in task_stat.items():
239                 for stat, val in stat_last.items():
240                     if stat in ['cpus', 'cache', 'swap', 'rss']:
241                         # meaningless stats like 16 cpu cores x 5 tasks = 80
242                         continue
243                     self.job_tot[category][stat] += val
244         logger.debug('%s: done totals', self.label)
245
246     def long_label(self):
247         label = self.label
248         if hasattr(self, 'process') and self.process['uuid'] not in label:
249             label = '{} ({})'.format(label, self.process['uuid'])
250         if self.finishtime:
251             label += ' -- elapsed time '
252             s = (self.finishtime - self.starttime).total_seconds()
253             if s > 86400:
254                 label += '{}d'.format(int(s/86400))
255             if s > 3600:
256                 label += '{}h'.format(int(s/3600) % 24)
257             if s > 60:
258                 label += '{}m'.format(int(s/60) % 60)
259             label += '{}s'.format(int(s) % 60)
260         return label
261
262     def text_report(self):
263         if not self.tasks:
264             return "(no report generated)\n"
265         return "\n".join(itertools.chain(
266             self._text_report_gen(),
267             self._recommend_gen())) + "\n"
268
269     def html_report(self):
270         return WEBCHART_CLASS(self.label, [self]).html()
271
272     def _text_report_gen(self):
273         yield "\t".join(['category', 'metric', 'task_max', 'task_max_rate', 'job_total'])
274         for category, stat_max in sorted(self.stats_max.items()):
275             for stat, val in sorted(stat_max.items()):
276                 if stat.endswith('__rate'):
277                     continue
278                 max_rate = self._format(stat_max.get(stat+'__rate', '-'))
279                 val = self._format(val)
280                 tot = self._format(self.job_tot[category].get(stat, '-'))
281                 yield "\t".join([category, stat, str(val), max_rate, tot])
282         for args in (
283                 ('Number of tasks: {}',
284                  len(self.tasks),
285                  None),
286                 ('Max CPU time spent by a single task: {}s',
287                  self.stats_max['cpu']['user+sys'],
288                  None),
289                 ('Max CPU usage in a single interval: {}%',
290                  self.stats_max['cpu']['user+sys__rate'],
291                  lambda x: x * 100),
292                 ('Overall CPU usage: {}%',
293                  float(self.job_tot['cpu']['user+sys']) /
294                  self.job_tot['time']['elapsed']
295                  if self.job_tot['time']['elapsed'] > 0 else 0,
296                  lambda x: x * 100),
297                 ('Max memory used by a single task: {}GB',
298                  self.stats_max['mem']['rss'],
299                  lambda x: x / 1e9),
300                 ('Max network traffic in a single task: {}GB',
301                  self.stats_max['net:eth0']['tx+rx'] +
302                  self.stats_max['net:keep0']['tx+rx'],
303                  lambda x: x / 1e9),
304                 ('Max network speed in a single interval: {}MB/s',
305                  self.stats_max['net:eth0']['tx+rx__rate'] +
306                  self.stats_max['net:keep0']['tx+rx__rate'],
307                  lambda x: x / 1e6),
308                 ('Keep cache miss rate {}%',
309                  (float(self.job_tot['keepcache']['miss']) /
310                  float(self.job_tot['keepcalls']['get']))
311                  if self.job_tot['keepcalls']['get'] > 0 else 0,
312                  lambda x: x * 100.0),
313                 ('Keep cache utilization {}%',
314                  (float(self.job_tot['blkio:0:0']['read']) /
315                  float(self.job_tot['net:keep0']['rx']))
316                  if self.job_tot['net:keep0']['rx'] > 0 else 0,
317                  lambda x: x * 100.0)):
318             format_string, val, transform = args
319             if val == float('-Inf'):
320                 continue
321             if transform:
322                 val = transform(val)
323             yield "# "+format_string.format(self._format(val))
324
325     def _recommend_gen(self):
326         return itertools.chain(
327             self._recommend_cpu(),
328             self._recommend_ram(),
329             self._recommend_keep_cache())
330
331     def _recommend_cpu(self):
332         """Recommend asking for 4 cores if max CPU usage was 333%"""
333
334         constraint_key = self._map_runtime_constraint('vcpus')
335         cpu_max_rate = self.stats_max['cpu']['user+sys__rate']
336         if cpu_max_rate == float('-Inf'):
337             logger.warning('%s: no CPU usage data', self.label)
338             return
339         used_cores = max(1, int(math.ceil(cpu_max_rate)))
340         asked_cores = self.existing_constraints.get(constraint_key)
341         if asked_cores is None or used_cores < asked_cores:
342             yield (
343                 '#!! {} max CPU usage was {}% -- '
344                 'try runtime_constraints "{}":{}'
345             ).format(
346                 self.label,
347                 math.ceil(cpu_max_rate*100),
348                 constraint_key,
349                 int(used_cores))
350
351     def _recommend_ram(self):
352         """Recommend an economical RAM constraint for this job.
353
354         Nodes that are advertised as "8 gibibytes" actually have what
355         we might call "8 nearlygibs" of memory available for jobs.
356         Here, we calculate a whole number of nearlygibs that would
357         have sufficed to run the job, then recommend requesting a node
358         with that number of nearlygibs (expressed as mebibytes).
359
360         Requesting a node with "nearly 8 gibibytes" is our best hope
361         of getting a node that actually has nearly 8 gibibytes
362         available.  If the node manager is smart enough to account for
363         the discrepancy itself when choosing/creating a node, we'll
364         get an 8 GiB node with nearly 8 GiB available.  Otherwise, the
365         advertised size of the next-size-smaller node (say, 6 GiB)
366         will be too low to satisfy our request, so we will effectively
367         get rounded up to 8 GiB.
368
369         For example, if we need 7500 MiB, we can ask for 7500 MiB, and
370         we will generally get a node that is advertised as "8 GiB" and
371         has at least 7500 MiB available.  However, asking for 8192 MiB
372         would either result in an unnecessarily expensive 12 GiB node
373         (if node manager knows about the discrepancy), or an 8 GiB
374         node which has less than 8192 MiB available and is therefore
375         considered by crunch-dispatch to be too small to meet our
376         constraint.
377
378         When node manager learns how to predict the available memory
379         for each node type such that crunch-dispatch always agrees
380         that a node is big enough to run the job it was brought up
381         for, all this will be unnecessary.  We'll just ask for exactly
382         the memory we want -- even if that happens to be 8192 MiB.
383         """
384
385         constraint_key = self._map_runtime_constraint('ram')
386         used_bytes = self.stats_max['mem']['rss']
387         if used_bytes == float('-Inf'):
388             logger.warning('%s: no memory usage data', self.label)
389             return
390         used_mib = math.ceil(float(used_bytes) / 1048576)
391         asked_mib = self.existing_constraints.get(constraint_key)
392
393         nearlygibs = lambda mebibytes: mebibytes/AVAILABLE_RAM_RATIO/1024
394         if asked_mib is None or (
395                 math.ceil(nearlygibs(used_mib)) < nearlygibs(asked_mib)):
396             yield (
397                 '#!! {} max RSS was {} MiB -- '
398                 'try runtime_constraints "{}":{}'
399             ).format(
400                 self.label,
401                 int(used_mib),
402                 constraint_key,
403                 int(math.ceil(nearlygibs(used_mib))*AVAILABLE_RAM_RATIO*1024*(2**20)/self._runtime_constraint_mem_unit()))
404
405     def _recommend_keep_cache(self):
406         """Recommend increasing keep cache if utilization < 80%"""
407         constraint_key = self._map_runtime_constraint('keep_cache_ram')
408         if self.job_tot['net:keep0']['rx'] == 0:
409             return
410         utilization = (float(self.job_tot['blkio:0:0']['read']) /
411                        float(self.job_tot['net:keep0']['rx']))
412         asked_mib = self.existing_constraints.get(constraint_key, 256)
413
414         if utilization < 0.8:
415             yield (
416                 '#!! {} Keep cache utilization was {:.2f}% -- '
417                 'try runtime_constraints "{}":{} (or more)'
418             ).format(
419                 self.label,
420                 utilization * 100.0,
421                 constraint_key,
422                 asked_mib*2*(2**20)/self._runtime_constraint_mem_unit())
423
424
425     def _format(self, val):
426         """Return a string representation of a stat.
427
428         {:.2f} for floats, default format for everything else."""
429         if isinstance(val, float):
430             return '{:.2f}'.format(val)
431         else:
432             return '{}'.format(val)
433
434     def _runtime_constraint_mem_unit(self):
435         if hasattr(self, 'runtime_constraint_mem_unit'):
436             return self.runtime_constraint_mem_unit
437         elif self.detected_crunch1:
438             return JobSummarizer.runtime_constraint_mem_unit
439         else:
440             return ContainerSummarizer.runtime_constraint_mem_unit
441
442     def _map_runtime_constraint(self, key):
443         if hasattr(self, 'map_runtime_constraint'):
444             return self.map_runtime_constraint[key]
445         elif self.detected_crunch1:
446             return JobSummarizer.map_runtime_constraint[key]
447         else:
448             return key
449
450
451 class CollectionSummarizer(Summarizer):
452     def __init__(self, collection_id, **kwargs):
453         super(CollectionSummarizer, self).__init__(
454             crunchstat_summary.reader.CollectionReader(collection_id), **kwargs)
455         self.label = collection_id
456
457
458 def NewSummarizer(process_or_uuid, **kwargs):
459     """Construct with the appropriate subclass for this uuid/object."""
460
461     if isinstance(process_or_uuid, dict):
462         process = process_or_uuid
463         uuid = process['uuid']
464     else:
465         uuid = process_or_uuid
466         process = None
467         arv = arvados.api('v1', model=OrderedJsonModel())
468
469     if '-dz642-' in uuid:
470         if process is None:
471             process = arv.containers().get(uuid=uuid).execute()
472         klass = ContainerTreeSummarizer
473     elif '-xvhdp-' in uuid:
474         if process is None:
475             process = arv.container_requests().get(uuid=uuid).execute()
476         klass = ContainerTreeSummarizer
477     elif '-8i9sb-' in uuid:
478         if process is None:
479             process = arv.jobs().get(uuid=uuid).execute()
480         klass = JobTreeSummarizer
481     elif '-d1hrv-' in uuid:
482         if process is None:
483             process = arv.pipeline_instances().get(uuid=uuid).execute()
484         klass = PipelineSummarizer
485     elif '-4zz18-' in uuid:
486         return CollectionSummarizer(collection_id=uuid)
487     else:
488         raise ArgumentError("Unrecognized uuid %s", uuid)
489     return klass(process, uuid=uuid, **kwargs)
490
491
492 class ProcessSummarizer(Summarizer):
493     """Process is a job, pipeline, container, or container request."""
494
495     def __init__(self, process, label=None, **kwargs):
496         rdr = None
497         self.process = process
498         if label is None:
499             label = self.process.get('name', self.process['uuid'])
500         if self.process.get('log'):
501             try:
502                 rdr = crunchstat_summary.reader.CollectionReader(self.process['log'])
503             except arvados.errors.NotFoundError as e:
504                 logger.warning("Trying event logs after failing to read "
505                                "log collection %s: %s", self.process['log'], e)
506         if rdr is None:
507             rdr = crunchstat_summary.reader.LiveLogReader(self.process['uuid'])
508             label = label + ' (partial)'
509         super(ProcessSummarizer, self).__init__(rdr, label=label, **kwargs)
510         self.existing_constraints = self.process.get('runtime_constraints', {})
511
512
513 class JobSummarizer(ProcessSummarizer):
514     runtime_constraint_mem_unit = 1048576
515     map_runtime_constraint = {
516         'keep_cache_ram': 'keep_cache_mb_per_task',
517         'ram': 'min_ram_mb_per_node',
518         'vcpus': 'min_cores_per_node',
519     }
520
521
522 class ContainerSummarizer(ProcessSummarizer):
523     runtime_constraint_mem_unit = 1
524
525
526 class MultiSummarizer(object):
527     def __init__(self, children={}, label=None, threads=1, **kwargs):
528         self.throttle = threading.Semaphore(threads)
529         self.children = children
530         self.label = label
531
532     def run_and_release(self, target, *args, **kwargs):
533         try:
534             return target(*args, **kwargs)
535         finally:
536             self.throttle.release()
537
538     def run(self):
539         threads = []
540         for child in self.children.values():
541             self.throttle.acquire()
542             t = threading.Thread(target=self.run_and_release, args=(child.run, ))
543             t.daemon = True
544             t.start()
545             threads.append(t)
546         for t in threads:
547             t.join()
548
549     def text_report(self):
550         txt = ''
551         d = self._descendants()
552         for child in d.values():
553             if len(d) > 1:
554                 txt += '### Summary for {} ({})\n'.format(
555                     child.label, child.process['uuid'])
556             txt += child.text_report()
557             txt += '\n'
558         return txt
559
560     def _descendants(self):
561         """Dict of self and all descendants.
562
563         Nodes with nothing of their own to report (like
564         MultiSummarizers) are omitted.
565         """
566         d = collections.OrderedDict()
567         for key, child in self.children.items():
568             if isinstance(child, Summarizer):
569                 d[key] = child
570             if isinstance(child, MultiSummarizer):
571                 d.update(child._descendants())
572         return d
573
574     def html_report(self):
575         return WEBCHART_CLASS(self.label, iter(self._descendants().values())).html()
576
577
578 class JobTreeSummarizer(MultiSummarizer):
579     """Summarizes a job and all children listed in its components field."""
580     def __init__(self, job, label=None, **kwargs):
581         arv = arvados.api('v1', model=OrderedJsonModel())
582         label = label or job.get('name', job['uuid'])
583         children = collections.OrderedDict()
584         children[job['uuid']] = JobSummarizer(job, label=label, **kwargs)
585         if job.get('components', None):
586             preloaded = {}
587             for j in arv.jobs().index(
588                     limit=len(job['components']),
589                     filters=[['uuid','in',list(job['components'].values())]]).execute()['items']:
590                 preloaded[j['uuid']] = j
591             for cname in sorted(job['components'].keys()):
592                 child_uuid = job['components'][cname]
593                 j = (preloaded.get(child_uuid) or
594                      arv.jobs().get(uuid=child_uuid).execute())
595                 children[child_uuid] = JobTreeSummarizer(job=j, label=cname, **kwargs)
596
597         super(JobTreeSummarizer, self).__init__(
598             children=children,
599             label=label,
600             **kwargs)
601
602
603 class PipelineSummarizer(MultiSummarizer):
604     def __init__(self, instance, **kwargs):
605         children = collections.OrderedDict()
606         for cname, component in instance['components'].items():
607             if 'job' not in component:
608                 logger.warning(
609                     "%s: skipping component with no job assigned", cname)
610             else:
611                 logger.info(
612                     "%s: job %s", cname, component['job']['uuid'])
613                 summarizer = JobTreeSummarizer(component['job'], label=cname, **kwargs)
614                 summarizer.label = '{} {}'.format(
615                     cname, component['job']['uuid'])
616                 children[cname] = summarizer
617         super(PipelineSummarizer, self).__init__(
618             children=children,
619             label=instance['uuid'],
620             **kwargs)
621
622
623 class ContainerTreeSummarizer(MultiSummarizer):
624     def __init__(self, root, skip_child_jobs=False, **kwargs):
625         arv = arvados.api('v1', model=OrderedJsonModel())
626
627         label = kwargs.pop('label', None) or root.get('name') or root['uuid']
628         root['name'] = label
629
630         children = collections.OrderedDict()
631         todo = collections.deque((root, ))
632         while len(todo) > 0:
633             current = todo.popleft()
634             label = current['name']
635             sort_key = current['created_at']
636             if current['uuid'].find('-xvhdp-') > 0:
637                 current = arv.containers().get(uuid=current['container_uuid']).execute()
638
639             summer = ContainerSummarizer(current, label=label, **kwargs)
640             summer.sort_key = sort_key
641             children[current['uuid']] = summer
642
643             page_filters = []
644             while True:
645                 child_crs = arv.container_requests().index(
646                     order=['uuid asc'],
647                     filters=page_filters+[
648                         ['requesting_container_uuid', '=', current['uuid']]],
649                 ).execute()
650                 if not child_crs['items']:
651                     break
652                 elif skip_child_jobs:
653                     logger.warning('%s: omitting stats from %d child containers'
654                                    ' because --skip-child-jobs flag is on',
655                                    label, child_crs['items_available'])
656                     break
657                 page_filters = [['uuid', '>', child_crs['items'][-1]['uuid']]]
658                 for cr in child_crs['items']:
659                     if cr['container_uuid']:
660                         logger.debug('%s: container req %s', current['uuid'], cr['uuid'])
661                         cr['name'] = cr.get('name') or cr['uuid']
662                         todo.append(cr)
663         sorted_children = collections.OrderedDict()
664         for uuid in sorted(list(children.keys()), key=lambda uuid: children[uuid].sort_key):
665             sorted_children[uuid] = children[uuid]
666         super(ContainerTreeSummarizer, self).__init__(
667             children=sorted_children,
668             label=root['name'],
669             **kwargs)