logging.getLogger().addHandler(logging.StreamHandler())
args = crunchstat_summary.command.ArgumentParser().parse_args(sys.argv[1:])
-s = crunchstat_summary.command.Command(args).summarizer()
-s.run()
-print(s.report(), end='')
+cmd = crunchstat_summary.command.Command(args)
+cmd.run()
+print(cmd.report(), end='')
--- /dev/null
+window.onload = function() {
+ var options = {};
+ chartData.forEach(function(data, idx) {
+ var div = document.createElement('div');
+ div.setAttribute('id', 'chart-'+idx);
+ div.setAttribute('style', 'width: 100%; height: 150px');
+ document.body.appendChild(div);
+ var chart = new CanvasJS.Chart('chart-'+idx, data);
+ chart.render();
+ });
+}
--- /dev/null
+from __future__ import print_function
+
+import json
+import pkg_resources
+
+
+class ChartJS(object):
+ JSLIB = 'https://cdnjs.cloudflare.com/ajax/libs/canvasjs/1.7.0/canvasjs.js'
+
+ def __init__(self, label, tasks):
+ self.label = label
+ self.tasks = tasks
+
+ def html(self):
+ return '''<!doctype html><html><head>
+ <title>{} stats</title>
+ <script type="text/javascript" src="{}"></script>
+ <script type="text/javascript">{}</script>
+ </head><body></body></html>
+ '''.format(self.label, self.JSLIB, self.js())
+
+ def js(self):
+ return 'var chartData = {};\n{}'.format(
+ json.dumps(self.chartData()),
+ pkg_resources.resource_string('crunchstat_summary', 'chartjs.js'))
+
+ def chartData(self):
+ maxpts = 0
+ for task in self.tasks.itervalues():
+ for series in task.series.itervalues():
+ maxpts = max(maxpts, len(series))
+ return [
+ {
+ 'title': {
+ 'text': '{}: {} {}'.format(self.label, stat[0], stat[1]),
+ },
+ 'data': [
+ {
+ 'type': 'line',
+ 'dataPoints': [
+ {'x': pt[0].total_seconds(), 'y': pt[1]}
+ for pt in task.series[stat]]
+ }
+ for label, task in self.tasks.iteritems()
+ ],
+ }
+ for stat in (('cpu', 'user+sys__rate'),
+ ('net:eth0', 'tx+rx__rate'),
+ ('mem', 'rss'))
+ ]
src.add_argument(
'--log-file', type=str,
help='Read log data from a regular file')
+ self.add_argument(
+ '--format', type=str, choices=('html', 'text'), default='text',
+ help='Report format')
class Command(object):
def __init__(self, args):
self.args = args
- def summarizer(self):
+ def run(self):
if self.args.pipeline_instance:
- return summarizer.PipelineSummarizer(self.args.pipeline_instance)
+ self.summer = summarizer.PipelineSummarizer(self.args.pipeline_instance)
elif self.args.job:
- return summarizer.JobSummarizer(self.args.job)
+ self.summer = summarizer.JobSummarizer(self.args.job)
elif self.args.log_file:
if self.args.log_file.endswith('.gz'):
fh = gzip.open(self.args.log_file)
else:
fh = open(self.args.log_file)
- return summarizer.Summarizer(fh)
+ self.summer = summarizer.Summarizer(fh)
else:
- return summarizer.Summarizer(sys.stdin)
+ self.summer = summarizer.Summarizer(sys.stdin)
+ return self.summer.run()
+
+ def report(self):
+ if self.args.format == 'html':
+ return self.summer.html_report()
+ elif self.args.format == 'text':
+ return self.summer.text_report()
import arvados
import collections
+import crunchstat_summary.chartjs
+import datetime
import functools
import itertools
import logging
# that have amounts like 7.5 GiB according to the kernel.)
AVAILABLE_RAM_RATIO = 0.95
+
+class Task(object):
+ def __init__(self):
+ self.starttime = None
+ self.series = collections.defaultdict(list)
+
+
class Summarizer(object):
existing_constraints = {}
# task_stats: {task_id: {category: {stat: val}}}
self.task_stats = collections.defaultdict(
functools.partial(collections.defaultdict, dict))
+ self.tasks = collections.defaultdict(Task)
for line in self._logdata:
m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) success in (?P<elapsed>\d+) seconds', line)
if m:
if elapsed > self.stats_max['time']['elapsed']:
self.stats_max['time']['elapsed'] = elapsed
continue
- m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) stderr crunchstat: (?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n', line)
+ m = re.search(r'^(?P<timestamp>\S+) \S+ \d+ (?P<seq>\d+) stderr crunchstat: (?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n', line)
if not m:
continue
if m.group('category').endswith(':'):
elif m.group('category') == 'error':
continue
task_id = m.group('seq')
+ timestamp = datetime.datetime.strptime(
+ m.group('timestamp'), '%Y-%m-%d_%H:%M:%S')
+ task = self.tasks[task_id]
+ if not task.starttime:
+ task.starttime = timestamp
this_interval_s = None
for group in ['current', 'interval']:
if not m.group(group):
else:
stat = stat + '__rate'
val = val / this_interval_s
+ if stat in ['user+sys__rate', 'tx+rx__rate']:
+ task.series[category, stat].append(
+ (timestamp - task.starttime, val))
else:
+ if stat in ['rss']:
+ task.series[category, stat].append(
+ (timestamp - task.starttime, val))
self.task_stats[task_id][category][stat] = val
if val > self.stats_max[category][stat]:
self.stats_max[category][stat] = val
continue
self.job_tot[category][stat] += val
- def report(self):
+ def text_report(self):
return "\n".join(itertools.chain(
- self._report_gen(),
+ self._text_report_gen(),
self._recommend_gen())) + "\n"
- def _report_gen(self):
+ def html_report(self):
+ return crunchstat_summary.chartjs.ChartJS(self.label, self.tasks).html()
+
+ def _text_report_gen(self):
yield "\t".join(['category', 'metric', 'task_max', 'task_max_rate', 'job_total'])
for category, stat_max in sorted(self.stats_max.iteritems()):
for stat, val in sorted(stat_max.iteritems()):
for summarizer in self.summarizers.itervalues():
summarizer.run()
- def report(self):
+ def text_report(self):
txt = ''
for cname, summarizer in self.summarizers.iteritems():
txt += '### Summary for {} ({})\n'.format(
cname, summarizer.job['uuid'])
- txt += summarizer.report()
+ txt += summarizer.text_report()
txt += '\n'
return txt
import arvados
import collections
import crunchstat_summary.command
-import crunchstat_summary.summarizer
import difflib
import glob
import gzip
import os
import unittest
-
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
+
class ReportDiff(unittest.TestCase):
- def diff_known_report(self, logfile, summarizer):
+ def diff_known_report(self, logfile, cmd):
expectfile = logfile+'.report'
expect = open(expectfile).readlines()
- self.diff_report(summarizer, expect, expectfile=expectfile)
+ self.diff_report(cmd, expect, expectfile=expectfile)
- def diff_report(self, summarizer, expect, expectfile=None):
- got = [x+"\n" for x in summarizer.report().strip("\n").split("\n")]
+ def diff_report(self, cmd, expect, expectfile=None):
+ got = [x+"\n" for x in cmd.report().strip("\n").split("\n")]
self.assertEqual(got, expect, "\n"+"".join(difflib.context_diff(
expect, got, fromfile=expectfile, tofile="(generated)")))
logfile = os.path.join(TESTS_DIR, fnm)
args = crunchstat_summary.command.ArgumentParser().parse_args(
['--log-file', logfile])
- summarizer = crunchstat_summary.command.Command(args).summarizer()
- summarizer.run()
- self.diff_known_report(logfile, summarizer)
+ cmd = crunchstat_summary.command.Command(args)
+ cmd.run()
+ self.diff_known_report(logfile, cmd)
class SummarizeJob(ReportDiff):
mock_cr().open.return_value = gzip.open(self.logfile)
args = crunchstat_summary.command.ArgumentParser().parse_args(
['--job', self.fake_job_uuid])
- summarizer = crunchstat_summary.command.Command(args).summarizer()
- summarizer.run()
- self.diff_known_report(self.logfile, summarizer)
+ cmd = crunchstat_summary.command.Command(args)
+ cmd.run()
+ self.diff_known_report(self.logfile, cmd)
mock_api().jobs().get.assert_called_with(uuid=self.fake_job_uuid)
mock_cr.assert_called_with(self.fake_log_id)
mock_cr().open.assert_called_with('fake-logfile.txt')
mock_cr().open.side_effect = [gzip.open(logfile) for _ in range(3)]
args = crunchstat_summary.command.ArgumentParser().parse_args(
['--pipeline-instance', self.fake_instance['uuid']])
- summarizer = crunchstat_summary.command.Command(args).summarizer()
- summarizer.run()
+ cmd = crunchstat_summary.command.Command(args)
+ cmd.run()
job_report = [
line for line in open(logfile+'.report').readlines()
job_report + ['\n'] +
['### Summary for baz (zzzzz-8i9sb-000000000000002)\n'] +
job_report)
- self.diff_report(summarizer, expect)
+ self.diff_report(cmd, expect)
mock_cr.assert_has_calls(
[
mock.call('fake-log-pdh-0'),