import cgi
import json
+import math
import pkg_resources
from crunchstat_summary import logger
}
for s in self.summarizers]
+ def _axisY(self, tasks, stat):
+ ymax = 1
+ for task in tasks.itervalues():
+ for pt in task.series[stat]:
+ ymax = max(ymax, pt[1])
+ ytick = math.exp((1+math.floor(math.log(ymax, 2)))*math.log(2))/4
+ return {
+ 'gridColor': '#cccccc',
+ 'gridThickness': 1,
+ 'interval': ytick,
+ 'minimum': 0,
+ 'maximum': ymax,
+ 'valueFormatString': "''",
+ }
+
def charts(self, label, tasks):
return [
{
- 'axisY': {
- 'minimum': 0,
- },
+ 'axisY': self._axisY(tasks=tasks, stat=stat),
'data': [
{
'type': 'line',
"collection {} has {} files; need exactly one".format(
collection_id, len(filenames)))
self._reader = collection.open(filenames[0])
+ self._label = "{}/{}".format(collection_id, filenames[0])
+
+ def __str__(self):
+ return self._label
def __iter__(self):
return iter(self._reader)
def __init__(self, job_uuid):
logger.debug('load stderr events for job %s', job_uuid)
- self._filters = [
- ['object_uuid', '=', job_uuid],
- ['event_type', '=', 'stderr']]
- self._label = job_uuid
+ self.job_uuid = job_uuid
+
+ def __str__(self):
+ return self.job_uuid
def _get_all_pages(self):
got = 0
last_id = 0
- while True:
- page = arvados.api().logs().index(
- limit=1000,
- order=['id asc'],
- filters=self._filters + [['id','>',str(last_id)]],
- ).execute(num_retries=2)
- got += len(page['items'])
- logger.debug(
- '%s: received %d of %d log events',
- self._label, got,
- got + page['items_available'] - len(page['items']))
- for i in page['items']:
- for line in i['properties']['text'].split('\n'):
- self._queue.put(line+'\n')
- last_id = i['id']
- if (len(page['items']) == 0 or
- len(page['items']) >= page['items_available']):
- break
- self._queue.put(self.EOF)
+ filters = [
+ ['object_uuid', '=', self.job_uuid],
+ ['event_type', '=', 'stderr']]
+ try:
+ while True:
+ page = arvados.api().logs().index(
+ limit=1000,
+ order=['id asc'],
+ filters=filters + [['id','>',str(last_id)]],
+ select=['id', 'properties'],
+ ).execute(num_retries=2)
+ got += len(page['items'])
+ logger.debug(
+ '%s: received %d of %d log events',
+ self.job_uuid, got,
+ got + page['items_available'] - len(page['items']))
+ for i in page['items']:
+ for line in i['properties']['text'].split('\n'):
+ self._queue.put(line+'\n')
+ last_id = i['id']
+ if (len(page['items']) == 0 or
+ len(page['items']) >= page['items_available']):
+ break
+ finally:
+ self._queue.put(self.EOF)
def __iter__(self):
self._queue = Queue.Queue()
def next(self):
line = self._queue.get()
if line is self.EOF:
+ self._thread.join()
raise StopIteration
return line
import math
import re
import sys
+import threading
from arvados.api import OrderedJsonModel
from crunchstat_summary import logger
# stats_max: {category: {stat: val}}
self.stats_max = collections.defaultdict(
- functools.partial(collections.defaultdict,
- lambda: float('-Inf')))
+ functools.partial(collections.defaultdict, lambda: 0))
# task_stats: {task_id: {category: {stat: val}}}
self.task_stats = collections.defaultdict(
functools.partial(collections.defaultdict, dict))
# constructor will overwrite this with something useful.
self.existing_constraints = {}
- logger.debug("%s: logdata %s", self.label, repr(logdata))
+ logger.debug("%s: logdata %s", self.label, logdata)
def run(self):
- logger.debug("%s: parsing log data", self.label)
+ logger.debug("%s: parsing logdata %s", self.label, self._logdata)
for line in self._logdata:
m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) job_task (?P<task_uuid>\S+)$', line)
if m:
child_summarizer.stats_max = self.stats_max
child_summarizer.task_stats = self.task_stats
child_summarizer.tasks = self.tasks
+ child_summarizer.starttime = self.starttime
child_summarizer.run()
logger.debug('%s: done %s', self.label, uuid)
continue
val = val / this_interval_s
if stat in ['user+sys__rate', 'tx+rx__rate']:
task.series[category, stat].append(
- (timestamp - task.starttime, val))
+ (timestamp - self.starttime, val))
else:
if stat in ['rss']:
task.series[category, stat].append(
- (timestamp - task.starttime, val))
+ (timestamp - self.starttime, val))
self.task_stats[task_id][category][stat] = val
if val > self.stats_max[category][stat]:
self.stats_max[category][stat] = val
self.stats_max['mem']['rss'],
lambda x: x / 1e9),
('Max network traffic in a single task: {}GB',
- self.stats_max['net:eth0']['tx+rx'],
+ self.stats_max['net:eth0']['tx+rx'] +
+ self.stats_max['net:keep0']['tx+rx'],
lambda x: x / 1e9),
('Max network speed in a single interval: {}MB/s',
- self.stats_max['net:eth0']['tx+rx__rate'],
+ self.stats_max['net:eth0']['tx+rx__rate'] +
+ self.stats_max['net:keep0']['tx+rx__rate'],
lambda x: x / 1e6)):
format_string, val, transform = args
if val == float('-Inf'):
if cpu_max_rate == float('-Inf'):
logger.warning('%s: no CPU usage data', self.label)
return
- used_cores = int(math.ceil(cpu_max_rate))
+ used_cores = max(1, int(math.ceil(cpu_max_rate)))
asked_cores = self.existing_constraints.get('min_cores_per_node')
if asked_cores is None or used_cores < asked_cores:
yield (
self.job = arv.jobs().get(uuid=job).execute()
else:
self.job = job
+ rdr = None
if self.job['log']:
- rdr = crunchstat_summary.reader.CollectionReader(self.job['log'])
- label = self.job['uuid']
- else:
+ try:
+ rdr = crunchstat_summary.reader.CollectionReader(self.job['log'])
+ except arvados.errors.NotFoundError as e:
+ logger.warning("Trying event logs after failing to read "
+ "log collection %s: %s", self.job['log'], e)
+ else:
+ label = self.job['uuid']
+ if rdr is None:
rdr = crunchstat_summary.reader.LiveLogReader(self.job['uuid'])
label = self.job['uuid'] + ' (partial)'
super(JobSummarizer, self).__init__(rdr, **kwargs)
self.label = pipeline_instance_uuid
def run(self):
+ threads = []
for summarizer in self.summarizers.itervalues():
- summarizer.run()
+ t = threading.Thread(target=summarizer.run)
+ t.daemon = True
+ t.start()
+ threads.append(t)
+ for t in threads:
+ t.join()
def text_report(self):
txt = ''
category metric task_max task_max_rate job_total
-blkio:0:0 read 0 0.00 0
-blkio:0:0 write 0 0.00 0
+blkio:0:0 read 0 0 0
+blkio:0:0 write 0 0 0
cpu cpus 8 - -
cpu sys 1.92 0.04 1.92
cpu user 3.83 0.09 3.83
cpu user+sys 5.75 0.13 5.75
-fuseops read 0 0.00 0
-fuseops write 0 0.00 0
-keepcache hit 0 0.00 0
-keepcache miss 0 0.00 0
-keepcalls get 0 0.00 0
-keepcalls put 0 0.00 0
+fuseops read 0 0 0
+fuseops write 0 0 0
+keepcache hit 0 0 0
+keepcache miss 0 0 0
+keepcalls get 0 0 0
+keepcalls put 0 0 0
mem cache 1678139392 - -
mem pgmajfault 0 - 0
mem rss 349814784 - -
net:eth0 rx 1754364530 41658344.87 1754364530
net:eth0 tx 38837956 920817.97 38837956
net:eth0 tx+rx 1793202486 42579162.83 1793202486
-net:keep0 rx 0 0.00 0
-net:keep0 tx 0 0.00 0
-net:keep0 tx+rx 0 0.00 0
+net:keep0 rx 0 0 0
+net:keep0 tx 0 0 0
+net:keep0 tx+rx 0 0 0
time elapsed 80 - 80
# Number of tasks: 1
# Max CPU time spent by a single task: 5.75s
category metric task_max task_max_rate job_total
cpu cpus 8 - -
-cpu sys 0.00 - 0.00
-cpu user 0.00 - 0.00
-cpu user+sys 0.00 - 0.00
+cpu sys 0 - 0.00
+cpu user 0 - 0.00
+cpu user+sys 0 - 0.00
mem cache 12288 - -
mem pgmajfault 0 - 0
mem rss 856064 - -
net:eth0 tx+rx 180 - 180
time elapsed 2 - 4
# Number of tasks: 2
-# Max CPU time spent by a single task: 0.00s
+# Max CPU time spent by a single task: 0s
+# Max CPU usage in a single interval: 0%
# Overall CPU usage: 0.00%
# Max memory used by a single task: 0.00GB
# Max network traffic in a single task: 0.00GB
+# Max network speed in a single interval: 0.00MB/s
+#!! 4xphq-8i9sb-zvb2ocfycpomrup max CPU usage was 0% -- try runtime_constraints "min_cores_per_node":1
#!! 4xphq-8i9sb-zvb2ocfycpomrup max RSS was 1 MiB -- try runtime_constraints "min_ram_mb_per_node":972
category metric task_max task_max_rate job_total
cpu cpus 8 - -
-cpu sys 0.00 - 0.00
-cpu user 0.00 - 0.00
-cpu user+sys 0.00 - 0.00
+cpu sys 0 - 0.00
+cpu user 0 - 0.00
+cpu user+sys 0 - 0.00
mem cache 8192 - -
mem pgmajfault 0 - 0
mem rss 450560 - -
net:eth0 tx+rx 180 - 180
time elapsed 2 - 3
# Number of tasks: 2
-# Max CPU time spent by a single task: 0.00s
+# Max CPU time spent by a single task: 0s
+# Max CPU usage in a single interval: 0%
# Overall CPU usage: 0.00%
# Max memory used by a single task: 0.00GB
# Max network traffic in a single task: 0.00GB
+# Max network speed in a single interval: 0.00MB/s
+#!! 4xphq-8i9sb-v831jm2uq0g2g9x max CPU usage was 0% -- try runtime_constraints "min_cores_per_node":1
#!! 4xphq-8i9sb-v831jm2uq0g2g9x max RSS was 1 MiB -- try runtime_constraints "min_ram_mb_per_node":972