+ @staticmethod
+ def one_task_per_input_stream(if_sequence=0, and_end_task=True):
+ if if_sequence != current_task()['sequence']:
+ return
+ job_input = current_job()['script_parameters']['input']
+ cr = CollectionReader(job_input)
+ for s in cr.all_streams():
+ task_input = s.tokens()
+ new_task_attrs = {
+ 'job_uuid': current_job()['uuid'],
+ 'created_by_job_task_uuid': current_task()['uuid'],
+ 'sequence': if_sequence + 1,
+ 'parameters': {
+ 'input':task_input
+ }
+ }
+ service.job_tasks().create(job_task=json.dumps(new_task_attrs)).execute()
+ if and_end_task:
+ service.job_tasks().update(uuid=current_task()['uuid'],
+ job_task=json.dumps({'success':True})
+ ).execute()
+ exit(0)
+
+class util:
+ @staticmethod
+ def clear_tmpdir(path=None):
+ """
+ Ensure the given directory (or TASK_TMPDIR if none given)
+ exists and is empty.
+ """
+ if path == None:
+ path = current_task().tmpdir
+ if os.path.exists(path):
+ p = subprocess.Popen(['rm', '-rf', path])
+ stdout, stderr = p.communicate(None)
+ if p.returncode != 0:
+ raise Exception('rm -rf %s: %s' % (path, stderr))
+ os.mkdir(path)
+
+ @staticmethod
+ def run_command(execargs, **kwargs):
+ kwargs.setdefault('stdin', subprocess.PIPE)
+ kwargs.setdefault('stdout', subprocess.PIPE)
+ kwargs.setdefault('stderr', sys.stderr)
+ kwargs.setdefault('close_fds', True)
+ kwargs.setdefault('shell', False)
+ p = subprocess.Popen(execargs, **kwargs)
+ stdoutdata, stderrdata = p.communicate(None)
+ if p.returncode != 0:
+ raise Exception("run_command %s exit %d:\n%s" %
+ (execargs, p.returncode, stderrdata))
+ return stdoutdata, stderrdata
+
+ @staticmethod
+ def git_checkout(url, version, path):
+ if not re.search('^/', path):
+ path = os.path.join(current_job().tmpdir, path)
+ if not os.path.exists(path):
+ util.run_command(["git", "clone", url, path],
+ cwd=os.path.dirname(path))
+ util.run_command(["git", "checkout", version],
+ cwd=path)
+ return path
+
+ @staticmethod
+ def tar_extractor(path, decompress_flag):
+ return subprocess.Popen(["tar",
+ "-C", path,
+ ("-x%sf" % decompress_flag),
+ "-"],
+ stdout=None,
+ stdin=subprocess.PIPE, stderr=sys.stderr,
+ shell=False, close_fds=True)
+
+ @staticmethod
+ def tarball_extract(tarball, path):
+ """Retrieve a tarball from Keep and extract it to a local
+ directory. Return the absolute path where the tarball was
+ extracted. If the top level of the tarball contained just one
+ file or directory, return the absolute path of that single
+ item.
+
+ tarball -- collection locator
+ path -- where to extract the tarball: absolute, or relative to job tmp
+ """
+ if not re.search('^/', path):
+ path = os.path.join(current_job().tmpdir, path)
+ lockfile = open(path + '.lock', 'w')
+ fcntl.flock(lockfile, fcntl.LOCK_EX)
+ try:
+ os.stat(path)
+ except OSError:
+ os.mkdir(path)
+ already_have_it = False
+ try:
+ if os.readlink(os.path.join(path, '.locator')) == tarball:
+ already_have_it = True
+ except OSError:
+ pass
+ if not already_have_it:
+
+ # emulate "rm -f" (i.e., if the file does not exist, we win)
+ try:
+ os.unlink(os.path.join(path, '.locator'))
+ except OSError:
+ if os.path.exists(os.path.join(path, '.locator')):
+ os.unlink(os.path.join(path, '.locator'))
+
+ for f in CollectionReader(tarball).all_files():
+ if re.search('\.(tbz|tar.bz2)$', f.name()):
+ p = util.tar_extractor(path, 'j')
+ elif re.search('\.(tgz|tar.gz)$', f.name()):
+ p = util.tar_extractor(path, 'z')
+ elif re.search('\.tar$', f.name()):
+ p = util.tar_extractor(path, '')
+ else:
+ raise Exception("tarball_extract cannot handle filename %s"
+ % f.name())
+ while True:
+ buf = f.read(2**20)
+ if len(buf) == 0:
+ break
+ p.stdin.write(buf)
+ p.stdin.close()
+ p.wait()
+ if p.returncode != 0:
+ lockfile.close()
+ raise Exception("tar exited %d" % p.returncode)
+ os.symlink(tarball, os.path.join(path, '.locator'))
+ tld_extracts = filter(lambda f: f != '.locator', os.listdir(path))
+ lockfile.close()
+ if len(tld_extracts) == 1:
+ return os.path.join(path, tld_extracts[0])
+ return path
+
+ @staticmethod
+ def zipball_extract(zipball, path):
+ """Retrieve a zip archive from Keep and extract it to a local
+ directory. Return the absolute path where the archive was
+ extracted. If the top level of the archive contained just one
+ file or directory, return the absolute path of that single
+ item.
+
+ zipball -- collection locator
+ path -- where to extract the archive: absolute, or relative to job tmp
+ """
+ if not re.search('^/', path):
+ path = os.path.join(current_job().tmpdir, path)
+ lockfile = open(path + '.lock', 'w')
+ fcntl.flock(lockfile, fcntl.LOCK_EX)
+ try:
+ os.stat(path)
+ except OSError:
+ os.mkdir(path)
+ already_have_it = False
+ try:
+ if os.readlink(os.path.join(path, '.locator')) == zipball:
+ already_have_it = True
+ except OSError:
+ pass
+ if not already_have_it:
+
+ # emulate "rm -f" (i.e., if the file does not exist, we win)
+ try:
+ os.unlink(os.path.join(path, '.locator'))
+ except OSError:
+ if os.path.exists(os.path.join(path, '.locator')):
+ os.unlink(os.path.join(path, '.locator'))
+
+ for f in CollectionReader(zipball).all_files():
+ if not re.search('\.zip$', f.name()):
+ raise Exception("zipball_extract cannot handle filename %s"
+ % f.name())
+ zip_filename = os.path.join(path, os.path.basename(f.name()))
+ zip_file = open(zip_filename, 'wb')
+ while True:
+ buf = f.read(2**20)
+ if len(buf) == 0:
+ break
+ zip_file.write(buf)
+ zip_file.close()
+
+ p = subprocess.Popen(["unzip",
+ "-q", "-o",
+ "-d", path,
+ zip_filename],
+ stdout=None,
+ stdin=None, stderr=sys.stderr,
+ shell=False, close_fds=True)
+ p.wait()
+ if p.returncode != 0:
+ lockfile.close()
+ raise Exception("unzip exited %d" % p.returncode)
+ os.unlink(zip_filename)
+ os.symlink(zipball, os.path.join(path, '.locator'))
+ tld_extracts = filter(lambda f: f != '.locator', os.listdir(path))
+ lockfile.close()
+ if len(tld_extracts) == 1:
+ return os.path.join(path, tld_extracts[0])
+ return path
+
+ @staticmethod
+ def collection_extract(collection, path, files=[], decompress=True):
+ """Retrieve a collection from Keep and extract it to a local
+ directory. Return the absolute path where the collection was
+ extracted.
+
+ collection -- collection locator
+ path -- where to extract: absolute, or relative to job tmp
+ """
+ matches = re.search(r'^([0-9a-f]+)(\+[\w@]+)*$', collection)
+ if matches:
+ collection_hash = matches.group(1)
+ else:
+ collection_hash = hashlib.md5(collection).hexdigest()
+ if not re.search('^/', path):
+ path = os.path.join(current_job().tmpdir, path)
+ lockfile = open(path + '.lock', 'w')
+ fcntl.flock(lockfile, fcntl.LOCK_EX)
+ try:
+ os.stat(path)
+ except OSError:
+ os.mkdir(path)
+ already_have_it = False
+ try:
+ if os.readlink(os.path.join(path, '.locator')) == collection_hash:
+ already_have_it = True
+ except OSError:
+ pass
+
+ # emulate "rm -f" (i.e., if the file does not exist, we win)
+ try:
+ os.unlink(os.path.join(path, '.locator'))
+ except OSError:
+ if os.path.exists(os.path.join(path, '.locator')):
+ os.unlink(os.path.join(path, '.locator'))
+
+ files_got = []
+ for s in CollectionReader(collection).all_streams():
+ stream_name = s.name()
+ for f in s.all_files():
+ if (files == [] or
+ ((f.name() not in files_got) and
+ (f.name() in files or
+ (decompress and f.decompressed_name() in files)))):
+ outname = f.decompressed_name() if decompress else f.name()
+ files_got += [outname]
+ if os.path.exists(os.path.join(path, stream_name, outname)):
+ continue
+ util.mkdir_dash_p(os.path.dirname(os.path.join(path, stream_name, outname)))
+ outfile = open(os.path.join(path, stream_name, outname), 'wb')
+ for buf in (f.readall_decompressed() if decompress
+ else f.readall()):
+ outfile.write(buf)
+ outfile.close()
+ if len(files_got) < len(files):
+ raise Exception("Wanted files %s but only got %s from %s" % (files, files_got, map(lambda z: z.name(), list(CollectionReader(collection).all_files()))))
+ os.symlink(collection_hash, os.path.join(path, '.locator'))
+
+ lockfile.close()
+ return path
+
+ @staticmethod
+ def mkdir_dash_p(path):
+ if not os.path.exists(path):
+ util.mkdir_dash_p(os.path.dirname(path))
+ try:
+ os.mkdir(path)
+ except OSError:
+ if not os.path.exists(path):
+ os.mkdir(path)
+
+ @staticmethod
+ def stream_extract(stream, path, files=[], decompress=True):
+ """Retrieve a stream from Keep and extract it to a local
+ directory. Return the absolute path where the stream was
+ extracted.
+
+ stream -- StreamReader object
+ path -- where to extract: absolute, or relative to job tmp
+ """
+ if not re.search('^/', path):
+ path = os.path.join(current_job().tmpdir, path)
+ lockfile = open(path + '.lock', 'w')
+ fcntl.flock(lockfile, fcntl.LOCK_EX)
+ try:
+ os.stat(path)
+ except OSError:
+ os.mkdir(path)
+
+ files_got = []
+ for f in stream.all_files():
+ if (files == [] or
+ ((f.name() not in files_got) and
+ (f.name() in files or
+ (decompress and f.decompressed_name() in files)))):
+ outname = f.decompressed_name() if decompress else f.name()
+ files_got += [outname]
+ if os.path.exists(os.path.join(path, outname)):
+ os.unlink(os.path.join(path, outname))
+ util.mkdir_dash_p(os.path.dirname(os.path.join(path, outname)))
+ outfile = open(os.path.join(path, outname), 'wb')
+ for buf in (f.readall_decompressed() if decompress
+ else f.readall()):
+ outfile.write(buf)
+ outfile.close()
+ if len(files_got) < len(files):
+ raise Exception("Wanted files %s but only got %s from %s" %
+ (files, files_got, map(lambda z: z.name(),
+ list(stream.all_files()))))
+ lockfile.close()
+ return path
+
+ @staticmethod
+ def listdir_recursive(dirname, base=None):
+ allfiles = []
+ for ent in sorted(os.listdir(dirname)):
+ ent_path = os.path.join(dirname, ent)
+ ent_base = os.path.join(base, ent) if base else ent
+ if os.path.isdir(ent_path):
+ allfiles += util.listdir_recursive(ent_path, ent_base)
+ else:
+ allfiles += [ent_base]
+ return allfiles
+
+class StreamFileReader(object):
+ def __init__(self, stream, pos, size, name):
+ self._stream = stream
+ self._pos = pos
+ self._size = size
+ self._name = name
+ self._filepos = 0
+ def name(self):
+ return self._name
+ def decompressed_name(self):
+ return re.sub('\.(bz2|gz)$', '', self._name)
+ def size(self):
+ return self._size
+ def stream_name(self):
+ return self._stream.name()
+ def read(self, size, **kwargs):
+ self._stream.seek(self._pos + self._filepos)
+ data = self._stream.read(min(size, self._size - self._filepos))
+ self._filepos += len(data)
+ return data
+ def readall(self, size=2**20, **kwargs):
+ while True:
+ data = self.read(size, **kwargs)
+ if data == '':
+ break
+ yield data
+ def bunzip2(self, size):
+ decompressor = bz2.BZ2Decompressor()
+ for chunk in self.readall(size):
+ data = decompressor.decompress(chunk)
+ if data and data != '':
+ yield data
+ def gunzip(self, size):
+ decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
+ for chunk in self.readall(size):
+ data = decompressor.decompress(decompressor.unconsumed_tail + chunk)
+ if data and data != '':
+ yield data
+ def readall_decompressed(self, size=2**20):
+ self._stream.seek(self._pos + self._filepos)
+ if re.search('\.bz2$', self._name):
+ return self.bunzip2(size)
+ elif re.search('\.gz$', self._name):
+ return self.gunzip(size)
+ else:
+ return self.readall(size)
+ def readlines(self, decompress=True):
+ if decompress:
+ datasource = self.readall_decompressed()
+ else:
+ self._stream.seek(self._pos + self._filepos)
+ datasource = self.readall()
+ data = ''
+ for newdata in datasource:
+ data += newdata
+ sol = 0
+ while True:
+ eol = string.find(data, "\n", sol)
+ if eol < 0:
+ break
+ yield data[sol:eol+1]
+ sol = eol+1
+ data = data[sol:]
+ if data != '':
+ yield data
+ def as_manifest(self):
+ if self.size() == 0:
+ return ("%s d41d8cd98f00b204e9800998ecf8427e+0 0:0:%s\n"
+ % (self._stream.name(), self.name()))
+ return string.join(self._stream.tokens_for_range(self._pos, self._size),
+ " ") + "\n"
+
+class StreamReader(object):
+ def __init__(self, tokens):
+ self._tokens = tokens
+ self._current_datablock_data = None
+ self._current_datablock_pos = 0
+ self._current_datablock_index = -1
+ self._pos = 0
+
+ self._stream_name = None
+ self.data_locators = []
+ self.files = []
+
+ for tok in self._tokens:
+ if self._stream_name == None:
+ self._stream_name = tok
+ elif re.search(r'^[0-9a-f]{32}(\+\S+)*$', tok):
+ self.data_locators += [tok]
+ elif re.search(r'^\d+:\d+:\S+', tok):
+ pos, size, name = tok.split(':',2)
+ self.files += [[int(pos), int(size), name]]
+ else:
+ raise Exception("Invalid manifest format")
+
+ def tokens(self):
+ return self._tokens
+ def tokens_for_range(self, range_start, range_size):
+ resp = [self._stream_name]
+ return_all_tokens = False
+ block_start = 0
+ token_bytes_skipped = 0
+ for locator in self.data_locators:
+ sizehint = re.search(r'\+(\d+)', locator)
+ if not sizehint:
+ return_all_tokens = True
+ if return_all_tokens:
+ resp += [locator]
+ next
+ blocksize = int(sizehint.group(0))
+ if range_start + range_size <= block_start:
+ break
+ if range_start < block_start + blocksize:
+ resp += [locator]
+ else:
+ token_bytes_skipped += blocksize
+ block_start += blocksize
+ for f in self.files:
+ if ((f[0] < range_start + range_size)
+ and
+ (f[0] + f[1] > range_start)
+ and
+ f[1] > 0):
+ resp += ["%d:%d:%s" % (f[0] - token_bytes_skipped, f[1], f[2])]
+ return resp
+ def name(self):
+ return self._stream_name
+ def all_files(self):
+ for f in self.files:
+ pos, size, name = f
+ yield StreamFileReader(self, pos, size, name)
+ def nextdatablock(self):
+ if self._current_datablock_index < 0:
+ self._current_datablock_pos = 0
+ self._current_datablock_index = 0
+ else:
+ self._current_datablock_pos += self.current_datablock_size()
+ self._current_datablock_index += 1
+ self._current_datablock_data = None
+ def current_datablock_data(self):
+ if self._current_datablock_data == None:
+ self._current_datablock_data = Keep.get(self.data_locators[self._current_datablock_index])
+ return self._current_datablock_data
+ def current_datablock_size(self):
+ if self._current_datablock_index < 0:
+ self.nextdatablock()
+ sizehint = re.search('\+(\d+)', self.data_locators[self._current_datablock_index])
+ if sizehint:
+ return int(sizehint.group(0))
+ return len(self.current_datablock_data())
+ def seek(self, pos):
+ """Set the position of the next read operation."""
+ self._pos = pos
+ def really_seek(self):
+ """Find and load the appropriate data block, so the byte at
+ _pos is in memory.
+ """
+ if self._pos == self._current_datablock_pos:
+ return True
+ if (self._current_datablock_pos != None and
+ self._pos >= self._current_datablock_pos and
+ self._pos <= self._current_datablock_pos + self.current_datablock_size()):
+ return True
+ if self._pos < self._current_datablock_pos:
+ self._current_datablock_index = -1
+ self.nextdatablock()
+ while (self._pos > self._current_datablock_pos and
+ self._pos > self._current_datablock_pos + self.current_datablock_size()):
+ self.nextdatablock()
+ def read(self, size):
+ """Read no more than size bytes -- but at least one byte,
+ unless _pos is already at the end of the stream.
+ """
+ if size == 0:
+ return ''
+ self.really_seek()
+ while self._pos >= self._current_datablock_pos + self.current_datablock_size():
+ self.nextdatablock()
+ if self._current_datablock_index >= len(self.data_locators):
+ return None
+ data = self.current_datablock_data()[self._pos - self._current_datablock_pos : self._pos - self._current_datablock_pos + size]
+ self._pos += len(data)
+ return data
+
+class CollectionReader(object):
+ def __init__(self, manifest_locator_or_text):
+ if re.search(r'^\S+( [a-f0-9]{32,}(\+\S+)*)+( \d+:\d+:\S+)+\n', manifest_locator_or_text):
+ self._manifest_text = manifest_locator_or_text
+ self._manifest_locator = None
+ else:
+ self._manifest_locator = manifest_locator_or_text
+ self._manifest_text = None
+ self._streams = None