+def NewSummarizer(process_or_uuid, **kwargs):
+ """Construct with the appropriate subclass for this uuid/object."""
+
+ if isinstance(process_or_uuid, dict):
+ process = process_or_uuid
+ uuid = process['uuid']
+ else:
+ uuid = process_or_uuid
+ process = None
+ arv = arvados.api('v1', model=OrderedJsonModel())
+
+ if '-dz642-' in uuid:
+ if process is None:
+ process = arv.containers().get(uuid=uuid).execute()
+ klass = ContainerTreeSummarizer
+ elif '-xvhdp-' in uuid:
+ if process is None:
+ process = arv.container_requests().get(uuid=uuid).execute()
+ klass = ContainerTreeSummarizer
+ elif '-8i9sb-' in uuid:
+ if process is None:
+ process = arv.jobs().get(uuid=uuid).execute()
+ klass = JobTreeSummarizer
+ elif '-d1hrv-' in uuid:
+ if process is None:
+ process = arv.pipeline_instances().get(uuid=uuid).execute()
+ klass = PipelineSummarizer
+ elif '-4zz18-' in uuid:
+ return CollectionSummarizer(collection_id=uuid)
+ else:
+ raise ArgumentError("Unrecognized uuid %s", uuid)
+ return klass(process, uuid=uuid, **kwargs)
+
+
+class ProcessSummarizer(Summarizer):
+ """Process is a job, pipeline, container, or container request."""
+
+ def __init__(self, process, label=None, **kwargs):
+ rdr = None
+ self.process = process
+ if label is None:
+ label = self.process.get('name', self.process['uuid'])
+ if self.process.get('log'):
+ try:
+ rdr = crunchstat_summary.reader.CollectionReader(self.process['log'])
+ except arvados.errors.NotFoundError as e:
+ logger.warning("Trying event logs after failing to read "
+ "log collection %s: %s", self.process['log'], e)
+ if rdr is None:
+ rdr = crunchstat_summary.reader.LiveLogReader(self.process['uuid'])
+ label = label + ' (partial)'
+ super(ProcessSummarizer, self).__init__(rdr, label=label, **kwargs)
+ self.existing_constraints = self.process.get('runtime_constraints', {})
+
+
+class JobSummarizer(ProcessSummarizer):
+ runtime_constraint_mem_unit = 1048576
+ map_runtime_constraint = {
+ 'keep_cache_ram': 'keep_cache_mb_per_task',
+ 'ram': 'min_ram_mb_per_node',
+ 'vcpus': 'min_cores_per_node',
+ }
+
+
+class ContainerSummarizer(ProcessSummarizer):
+ runtime_constraint_mem_unit = 1
+
+
+class MultiSummarizer(object):
+ def __init__(self, children={}, label=None, threads=1, **kwargs):
+ self.throttle = threading.Semaphore(threads)
+ self.children = children
+ self.label = label
+
+ def run_and_release(self, target, *args, **kwargs):
+ try:
+ return target(*args, **kwargs)
+ finally:
+ self.throttle.release()
+
+ def run(self):
+ threads = []
+ for child in self.children.itervalues():
+ self.throttle.acquire()
+ t = threading.Thread(target=self.run_and_release, args=(child.run, ))
+ t.daemon = True
+ t.start()
+ threads.append(t)
+ for t in threads:
+ t.join()
+
+ def text_report(self):
+ txt = ''
+ d = self._descendants()
+ for child in d.itervalues():
+ if len(d) > 1:
+ txt += '### Summary for {} ({})\n'.format(
+ child.label, child.process['uuid'])
+ txt += child.text_report()
+ txt += '\n'
+ return txt
+
+ def _descendants(self):
+ """Dict of self and all descendants.
+
+ Nodes with nothing of their own to report (like
+ MultiSummarizers) are omitted.
+ """
+ d = collections.OrderedDict()
+ for key, child in self.children.iteritems():
+ if isinstance(child, Summarizer):
+ d[key] = child
+ if isinstance(child, MultiSummarizer):
+ d.update(child._descendants())
+ return d
+
+ def html_report(self):
+ return WEBCHART_CLASS(self.label, self._descendants().itervalues()).html()
+
+
+class JobTreeSummarizer(MultiSummarizer):
+ """Summarizes a job and all children listed in its components field."""
+ def __init__(self, job, label=None, **kwargs):
+ arv = arvados.api('v1', model=OrderedJsonModel())
+ label = label or job.get('name', job['uuid'])
+ children = collections.OrderedDict()
+ children[job['uuid']] = JobSummarizer(job, label=label, **kwargs)
+ if job.get('components', None):
+ preloaded = {}
+ for j in arv.jobs().index(
+ limit=len(job['components']),
+ filters=[['uuid','in',job['components'].values()]]).execute()['items']:
+ preloaded[j['uuid']] = j
+ for cname in sorted(job['components'].keys()):
+ child_uuid = job['components'][cname]
+ j = (preloaded.get(child_uuid) or
+ arv.jobs().get(uuid=child_uuid).execute())
+ children[child_uuid] = JobTreeSummarizer(job=j, label=cname, **kwargs)
+
+ super(JobTreeSummarizer, self).__init__(
+ children=children,
+ label=label,
+ **kwargs)
+
+
+class PipelineSummarizer(MultiSummarizer):
+ def __init__(self, instance, **kwargs):
+ children = collections.OrderedDict()