X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/6699c983ac7c8506d17a60323ba71478ea2ac20d..3854e6bfcd5344bce5ee0388248cb115e3c5e902:/tools/crunchstat-summary/crunchstat_summary/summarizer.py?ds=sidebyside diff --git a/tools/crunchstat-summary/crunchstat_summary/summarizer.py b/tools/crunchstat-summary/crunchstat_summary/summarizer.py index 60f1f70f60..f692469436 100644 --- a/tools/crunchstat-summary/crunchstat_summary/summarizer.py +++ b/tools/crunchstat-summary/crunchstat_summary/summarizer.py @@ -41,9 +41,10 @@ class Task(object): class Summarizer(object): - def __init__(self, logdata, label=None, skip_child_jobs=False): + def __init__(self, logdata, label=None, skip_child_jobs=False, uuid=None, **kwargs): self._logdata = logdata + self.uuid = uuid self.label = label self.starttime = None self.finishtime = None @@ -69,55 +70,59 @@ class Summarizer(object): def run(self): logger.debug("%s: parsing logdata %s", self.label, self._logdata) - for line in self._logdata: - m = re.search(r'^\S+ \S+ \d+ (?P\d+) job_task (?P\S+)$', line) - if m: - seq = int(m.group('seq')) - uuid = m.group('task_uuid') - self.seq_to_uuid[seq] = uuid - logger.debug('%s: seq %d is task %s', self.label, seq, uuid) - continue + with self._logdata as logdata: + self._run(logdata) - m = re.search(r'^\S+ \S+ \d+ (?P\d+) (success in|failure \(#., permanent\) after) (?P\d+) seconds', line) - if m: - task_id = self.seq_to_uuid[int(m.group('seq'))] - elapsed = int(m.group('elapsed')) - self.task_stats[task_id]['time'] = {'elapsed': elapsed} - if elapsed > self.stats_max['time']['elapsed']: - self.stats_max['time']['elapsed'] = elapsed - continue + def _run(self, logdata): + self.detected_crunch1 = False + for line in logdata: + if not self.detected_crunch1 and '-8i9sb-' in line: + self.detected_crunch1 = True - m = re.search(r'^\S+ \S+ \d+ (?P\d+) stderr Queued job (?P\S+)$', line) - if m: - uuid = m.group('uuid') - if self._skip_child_jobs: - logger.warning('%s: omitting stats from child job %s' - ' because --skip-child-jobs flag is on', - self.label, uuid) + if self.detected_crunch1: + m = re.search(r'^\S+ \S+ \d+ (?P\d+) job_task (?P\S+)$', line) + if m: + seq = int(m.group('seq')) + uuid = m.group('task_uuid') + self.seq_to_uuid[seq] = uuid + logger.debug('%s: seq %d is task %s', self.label, seq, uuid) continue - logger.debug('%s: follow %s', self.label, uuid) - child_summarizer = ProcessSummarizer(uuid) - child_summarizer.stats_max = self.stats_max - child_summarizer.task_stats = self.task_stats - child_summarizer.tasks = self.tasks - child_summarizer.starttime = self.starttime - child_summarizer.run() - logger.debug('%s: done %s', self.label, uuid) - continue - m = re.search(r'^(?P[^\s.]+)(\.\d+)? (?P\S+) \d+ (?P\d+) stderr crunchstat: (?P\S+) (?P.*?)( -- interval (?P.*))?\n$', line) - if m: - # crunch1 job - task_id = self.seq_to_uuid[int(m.group('seq'))] + m = re.search(r'^\S+ \S+ \d+ (?P\d+) (success in|failure \(#., permanent\) after) (?P\d+) seconds', line) + if m: + task_id = self.seq_to_uuid[int(m.group('seq'))] + elapsed = int(m.group('elapsed')) + self.task_stats[task_id]['time'] = {'elapsed': elapsed} + if elapsed > self.stats_max['time']['elapsed']: + self.stats_max['time']['elapsed'] = elapsed + continue + + m = re.search(r'^\S+ \S+ \d+ (?P\d+) stderr Queued job (?P\S+)$', line) + if m: + uuid = m.group('uuid') + if self._skip_child_jobs: + logger.warning('%s: omitting stats from child job %s' + ' because --skip-child-jobs flag is on', + self.label, uuid) + continue + logger.debug('%s: follow %s', self.label, uuid) + child_summarizer = ProcessSummarizer(uuid) + child_summarizer.stats_max = self.stats_max + child_summarizer.task_stats = self.task_stats + child_summarizer.tasks = self.tasks + child_summarizer.starttime = self.starttime + child_summarizer.run() + logger.debug('%s: done %s', self.label, uuid) + continue + + m = re.search(r'^(?P[^\s.]+)(\.\d+)? (?P\S+) \d+ (?P\d+) stderr crunchstat: (?P\S+) (?P.*?)( -- interval (?P.*))?\n$', line) + if not m: + continue else: + # crunch2 m = re.search(r'^(?P\S+) (?P\S+) (?P.*?)( -- interval (?P.*))?\n$', line) - if m: - # crunch2 container (seq/task don't apply) - task_id = 'container' - else: - # not a crunchstat log + if not m: continue - task = self.tasks[task_id] if self.label is None: try: @@ -129,11 +134,17 @@ class Summarizer(object): continue elif m.group('category') in ('error', 'caught'): continue - elif m.group('category') == 'read': + elif m.group('category') in ('read', 'open', 'cgroup', 'CID', 'Running'): # "stderr crunchstat: read /proc/1234/net/dev: ..." - # (crunchstat formatting fixed, but old logs still say this) + # (old logs are less careful with unprefixed error messages) continue + if self.detected_crunch1: + task_id = self.seq_to_uuid[int(m.group('seq'))] + else: + task_id = 'container' + task = self.tasks[task_id] + # Use the first and last crunchstat timestamps as # approximations of starttime and finishtime. timestamp = m.group('timestamp') @@ -171,8 +182,10 @@ class Summarizer(object): else: stats[stat] = int(val) except ValueError as e: - logger.warning('Error parsing {} stat: {!r}'.format( - stat, e)) + logger.warning( + 'Error parsing value %r (stat %r, category %r): %r', + val, stat, category, e) + logger.warning('%s', line) continue if 'user' in stats or 'sys' in stats: stats['user+sys'] = stats.get('user', 0) + stats.get('sys', 0) @@ -216,6 +229,8 @@ class Summarizer(object): def long_label(self): label = self.label + if hasattr(self, 'process') and self.process['uuid'] not in label: + label = '{} ({})'.format(label, self.process['uuid']) if self.finishtime: label += ' -- elapsed time ' s = (self.finishtime - self.starttime).total_seconds() @@ -300,19 +315,21 @@ class Summarizer(object): def _recommend_cpu(self): """Recommend asking for 4 cores if max CPU usage was 333%""" + constraint_key = self._map_runtime_constraint('vcpus') cpu_max_rate = self.stats_max['cpu']['user+sys__rate'] if cpu_max_rate == float('-Inf'): logger.warning('%s: no CPU usage data', self.label) return used_cores = max(1, int(math.ceil(cpu_max_rate))) - asked_cores = self.existing_constraints.get('min_cores_per_node') + asked_cores = self.existing_constraints.get(constraint_key) if asked_cores is None or used_cores < asked_cores: yield ( '#!! {} max CPU usage was {}% -- ' - 'try runtime_constraints "min_cores_per_node":{}' + 'try runtime_constraints "{}":{}' ).format( self.label, int(math.ceil(cpu_max_rate*100)), + constraint_key, int(used_cores)) def _recommend_ram(self): @@ -349,40 +366,44 @@ class Summarizer(object): the memory we want -- even if that happens to be 8192 MiB. """ + constraint_key = self._map_runtime_constraint('ram') used_bytes = self.stats_max['mem']['rss'] if used_bytes == float('-Inf'): logger.warning('%s: no memory usage data', self.label) return used_mib = math.ceil(float(used_bytes) / 1048576) - asked_mib = self.existing_constraints.get('min_ram_mb_per_node') + asked_mib = self.existing_constraints.get(constraint_key) nearlygibs = lambda mebibytes: mebibytes/AVAILABLE_RAM_RATIO/1024 if asked_mib is None or ( math.ceil(nearlygibs(used_mib)) < nearlygibs(asked_mib)): yield ( '#!! {} max RSS was {} MiB -- ' - 'try runtime_constraints "min_ram_mb_per_node":{}' + 'try runtime_constraints "{}":{}' ).format( self.label, int(used_mib), - int(math.ceil(nearlygibs(used_mib))*AVAILABLE_RAM_RATIO*1024)) + constraint_key, + int(math.ceil(nearlygibs(used_mib))*AVAILABLE_RAM_RATIO*1024*(2**20)/self._runtime_constraint_mem_unit())) def _recommend_keep_cache(self): """Recommend increasing keep cache if utilization < 80%""" + constraint_key = self._map_runtime_constraint('keep_cache_ram') if self.job_tot['net:keep0']['rx'] == 0: return utilization = (float(self.job_tot['blkio:0:0']['read']) / float(self.job_tot['net:keep0']['rx'])) - asked_mib = self.existing_constraints.get('keep_cache_mb_per_task', 256) + asked_mib = self.existing_constraints.get(constraint_key, 256) if utilization < 0.8: yield ( '#!! {} Keep cache utilization was {:.2f}% -- ' - 'try runtime_constraints "keep_cache_mb_per_task":{} (or more)' + 'try runtime_constraints "{}":{} (or more)' ).format( self.label, utilization * 100.0, - asked_mib*2) + constraint_key, + asked_mib*2*(2**20)/self._runtime_constraint_mem_unit()) def _format(self, val): @@ -394,6 +415,22 @@ class Summarizer(object): else: return '{}'.format(val) + def _runtime_constraint_mem_unit(self): + if hasattr(self, 'runtime_constraint_mem_unit'): + return self.runtime_constraint_mem_unit + elif self.detected_crunch1: + return JobSummarizer.runtime_constraint_mem_unit + else: + return ContainerSummarizer.runtime_constraint_mem_unit + + def _map_runtime_constraint(self, key): + if hasattr(self, 'map_runtime_constraint'): + return self.map_runtime_constraint[key] + elif self.detected_crunch1: + return JobSummarizer.map_runtime_constraint[key] + else: + return key + class CollectionSummarizer(Summarizer): def __init__(self, collection_id, **kwargs): @@ -402,86 +439,91 @@ class CollectionSummarizer(Summarizer): self.label = collection_id -def NewSummarizer(process, **kwargs): +def NewSummarizer(process_or_uuid, **kwargs): """Construct with the appropriate subclass for this uuid/object.""" - if not isinstance(process, dict): - uuid = process + if isinstance(process_or_uuid, dict): + process = process_or_uuid + uuid = process['uuid'] + else: + uuid = process_or_uuid process = None arv = arvados.api('v1', model=OrderedJsonModel()) - if re.search('-dz642-', uuid): + if '-dz642-' in uuid: if process is None: process = arv.containers().get(uuid=uuid).execute() - return ContainerSummarizer(process, **kwargs) - elif re.search('-xvhdp-', uuid): + klass = ContainerTreeSummarizer + elif '-xvhdp-' in uuid: if process is None: - ctrReq = arv.container_requests().get(uuid=uuid).execute() - ctrUUID = ctrReq['container_uuid'] - process = arv.containers().get(uuid=ctrUUID).execute() - return ContainerSummarizer(process, **kwargs) - elif re.search('-8i9sb-', uuid): + process = arv.container_requests().get(uuid=uuid).execute() + klass = ContainerTreeSummarizer + elif '-8i9sb-' in uuid: if process is None: process = arv.jobs().get(uuid=uuid).execute() - return JobSummarizer(process, **kwargs) - elif re.search('-d1hrv-', uuid): + klass = JobTreeSummarizer + elif '-d1hrv-' in uuid: if process is None: process = arv.pipeline_instances().get(uuid=uuid).execute() - return PipelineSummarizer(process, **kwargs) + klass = PipelineSummarizer + elif '-4zz18-' in uuid: + return CollectionSummarizer(collection_id=uuid) else: raise ArgumentError("Unrecognized uuid %s", uuid) + return klass(process, uuid=uuid, **kwargs) class ProcessSummarizer(Summarizer): """Process is a job, pipeline, container, or container request.""" - def __init__(self, process, **kwargs): + def __init__(self, process, label=None, **kwargs): rdr = None self.process = process + if label is None: + label = self.process.get('name', self.process['uuid']) if self.process.get('log'): try: rdr = crunchstat_summary.reader.CollectionReader(self.process['log']) except arvados.errors.NotFoundError as e: logger.warning("Trying event logs after failing to read " "log collection %s: %s", self.process['log'], e) - else: - label = self.process['uuid'] if rdr is None: rdr = crunchstat_summary.reader.LiveLogReader(self.process['uuid']) - label = self.process['uuid'] + ' (partial)' - super(ProcessSummarizer, self).__init__(rdr, **kwargs) - self.label = label + label = label + ' (partial)' + super(ProcessSummarizer, self).__init__(rdr, label=label, **kwargs) self.existing_constraints = self.process.get('runtime_constraints', {}) class JobSummarizer(ProcessSummarizer): - pass + runtime_constraint_mem_unit = 1048576 + map_runtime_constraint = { + 'keep_cache_ram': 'keep_cache_mb_per_task', + 'ram': 'min_ram_mb_per_node', + 'vcpus': 'min_cores_per_node', + } class ContainerSummarizer(ProcessSummarizer): - pass + runtime_constraint_mem_unit = 1 -class PipelineSummarizer(object): - def __init__(self, instance, **kwargs): - self.summarizers = collections.OrderedDict() - for cname, component in instance['components'].iteritems(): - if 'job' not in component: - logger.warning( - "%s: skipping component with no job assigned", cname) - else: - logger.info( - "%s: job %s", cname, component['job']['uuid']) - summarizer = JobSummarizer(component['job'], **kwargs) - summarizer.label = '{} {}'.format( - cname, component['job']['uuid']) - self.summarizers[cname] = summarizer - self.label = instance['uuid'] +class MultiSummarizer(object): + def __init__(self, children={}, label=None, threads=1, **kwargs): + self.throttle = threading.Semaphore(threads) + self.children = children + self.label = label + + def run_and_release(self, target, *args, **kwargs): + try: + return target(*args, **kwargs) + finally: + self.throttle.release() def run(self): threads = [] - for summarizer in self.summarizers.itervalues(): - t = threading.Thread(target=summarizer.run) + for child in self.children.itervalues(): + self.throttle.acquire() + t = threading.Thread(target=self.run_and_release, args=(child.run, )) t.daemon = True t.start() threads.append(t) @@ -490,12 +532,117 @@ class PipelineSummarizer(object): def text_report(self): txt = '' - for cname, summarizer in self.summarizers.iteritems(): - txt += '### Summary for {} ({})\n'.format( - cname, summarizer.process['uuid']) - txt += summarizer.text_report() + d = self._descendants() + for child in d.itervalues(): + if len(d) > 1: + txt += '### Summary for {} ({})\n'.format( + child.label, child.process['uuid']) + txt += child.text_report() txt += '\n' return txt + def _descendants(self): + """Dict of self and all descendants. + + Nodes with nothing of their own to report (like + MultiSummarizers) are omitted. + """ + d = collections.OrderedDict() + for key, child in self.children.iteritems(): + if isinstance(child, Summarizer): + d[key] = child + if isinstance(child, MultiSummarizer): + d.update(child._descendants()) + return d + def html_report(self): - return WEBCHART_CLASS(self.label, self.summarizers.itervalues()).html() + return WEBCHART_CLASS(self.label, self._descendants().itervalues()).html() + + +class JobTreeSummarizer(MultiSummarizer): + """Summarizes a job and all children listed in its components field.""" + def __init__(self, job, label=None, **kwargs): + arv = arvados.api('v1', model=OrderedJsonModel()) + label = label or job.get('name', job['uuid']) + children = collections.OrderedDict() + children[job['uuid']] = JobSummarizer(job, label=label, **kwargs) + if job.get('components', None): + preloaded = {} + for j in arv.jobs().index( + limit=len(job['components']), + filters=[['uuid','in',job['components'].values()]]).execute()['items']: + preloaded[j['uuid']] = j + for cname in sorted(job['components'].keys()): + child_uuid = job['components'][cname] + j = (preloaded.get(child_uuid) or + arv.jobs().get(uuid=child_uuid).execute()) + children[child_uuid] = JobTreeSummarizer(job=j, label=cname, **kwargs) + + super(JobTreeSummarizer, self).__init__( + children=children, + label=label, + **kwargs) + + +class PipelineSummarizer(MultiSummarizer): + def __init__(self, instance, **kwargs): + children = collections.OrderedDict() + for cname, component in instance['components'].iteritems(): + if 'job' not in component: + logger.warning( + "%s: skipping component with no job assigned", cname) + else: + logger.info( + "%s: job %s", cname, component['job']['uuid']) + summarizer = JobTreeSummarizer(component['job'], label=cname, **kwargs) + summarizer.label = '{} {}'.format( + cname, component['job']['uuid']) + children[cname] = summarizer + super(PipelineSummarizer, self).__init__( + children=children, + label=instance['uuid'], + **kwargs) + + +class ContainerTreeSummarizer(MultiSummarizer): + def __init__(self, root, **kwargs): + arv = arvados.api('v1', model=OrderedJsonModel()) + + label = kwargs.pop('label', None) or root.get('name') or root['uuid'] + root['name'] = label + + children = collections.OrderedDict() + todo = collections.deque((root, )) + while len(todo) > 0: + current = todo.popleft() + label = current['name'] + sort_key = current['created_at'] + if current['uuid'].find('-xvhdp-') > 0: + current = arv.containers().get(uuid=current['container_uuid']).execute() + + summer = ContainerSummarizer(current, label=label, **kwargs) + summer.sort_key = sort_key + children[current['uuid']] = summer + + page_filters = [] + while True: + items = arv.container_requests().index( + order=['uuid asc'], + filters=page_filters+[ + ['requesting_container_uuid', '=', current['uuid']]], + ).execute()['items'] + if not items: + break + page_filters = [['uuid', '>', items[-1]['uuid']]] + for cr in items: + if cr['container_uuid']: + logger.debug('%s: container req %s', current['uuid'], cr['uuid']) + cr['name'] = cr.get('name') or cr['uuid'] + todo.append(cr) + sorted_children = collections.OrderedDict() + for uuid in sorted(children.keys(), key=lambda uuid: children[uuid].sort_key): + sorted_children[uuid] = children[uuid] + super(ContainerTreeSummarizer, self).__init__( + children=sorted_children, + label=root['name'], + **kwargs)