-import gflags
-import httplib
-import httplib2
-import logging
+import collections
+import hashlib
import os
-import pprint
-import sys
-import types
-import subprocess
-import json
-import UserDict
import re
-import hashlib
-import string
-import bz2
-import zlib
-import fcntl
-import time
import threading
+import functools
+import copy
-class StreamFileReader(object):
- def __init__(self, stream, pos, size, name):
- self._stream = stream
- self._pos = pos
- self._size = size
- self._name = name
- self._filepos = 0
+from .ranges import *
+from .arvfile import ArvadosFileBase, StreamFileReader
+from arvados.retry import retry_method
+from keep import *
+import config
+import errors
- def name(self):
- return self._name
+def locator_block_size(loc):
+ s = re.match(r'[0-9a-f]{32}\+(\d+)(\+\S+)*', loc)
+ return long(s.group(1))
- def decompressed_name(self):
- return re.sub('\.(bz2|gz)$', '', self._name)
+def normalize_stream(s, stream):
+ '''
+ s is the stream name
+ stream is a dict mapping each filename to a list in the form [block locator, block size, segment offset (from beginning of block), segment size]
+ returns the stream as a list of tokens
+ '''
+ stream_tokens = [s]
+ sortedfiles = list(stream.keys())
+ sortedfiles.sort()
- def size(self):
- return self._size
-
- def stream_name(self):
- return self._stream.name()
-
- def read(self, size, **kwargs):
- self._stream.seek(self._pos + self._filepos)
- data = self._stream.read(min(size, self._size - self._filepos))
- self._filepos += len(data)
- return data
-
- def readall(self, size=2**20, **kwargs):
- while True:
- data = self.read(size, **kwargs)
- if data == '':
- break
- yield data
-
- def bunzip2(self, size):
- decompressor = bz2.BZ2Decompressor()
- for chunk in self.readall(size):
- data = decompressor.decompress(chunk)
- if data and data != '':
- yield data
-
- def gunzip(self, size):
- decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
- for chunk in self.readall(size):
- data = decompressor.decompress(decompressor.unconsumed_tail + chunk)
- if data and data != '':
- yield data
-
- def readall_decompressed(self, size=2**20):
- self._stream.seek(self._pos + self._filepos)
- if re.search('\.bz2$', self._name):
- return self.bunzip2(size)
- elif re.search('\.gz$', self._name):
- return self.gunzip(size)
- else:
- return self.readall(size)
+ blocks = {}
+ streamoffset = 0L
+ # Go through each file and add each referenced block exactly once.
+ for f in sortedfiles:
+ for b in stream[f]:
+ if b.locator not in blocks:
+ stream_tokens.append(b.locator)
+ blocks[b.locator] = streamoffset
+ streamoffset += locator_block_size(b.locator)
- def readlines(self, decompress=True):
- if decompress:
- datasource = self.readall_decompressed()
- else:
- self._stream.seek(self._pos + self._filepos)
- datasource = self.readall()
- data = ''
- for newdata in datasource:
- data += newdata
- sol = 0
- while True:
- eol = string.find(data, "\n", sol)
- if eol < 0:
- break
- yield data[sol:eol+1]
- sol = eol+1
- data = data[sol:]
- if data != '':
- yield data
-
- def as_manifest(self):
- if self.size() == 0:
- return ("%s %s 0:0:%s\n"
- % (self._stream.name(), EMPTY_BLOCK_LOCATOR, self.name()))
- return string.join(self._stream.tokens_for_range(self._pos, self._size),
- " ") + "\n"
+ # Add the empty block if the stream is otherwise empty.
+ if len(stream_tokens) == 1:
+ stream_tokens.append(config.EMPTY_BLOCK_LOCATOR)
-class StreamReader(object):
- def __init__(self, tokens):
- self._tokens = tokens
- self._current_datablock_data = None
- self._current_datablock_pos = 0
- self._current_datablock_index = -1
- self._pos = 0
+ for f in sortedfiles:
+ # Add in file segments
+ current_span = None
+ fout = f.replace(' ', '\\040')
+ for segment in stream[f]:
+ # Collapse adjacent segments
+ streamoffset = blocks[segment.locator] + segment.segment_offset
+ if current_span is None:
+ current_span = [streamoffset, streamoffset + segment.segment_size]
+ else:
+ if streamoffset == current_span[1]:
+ current_span[1] += segment.segment_size
+ else:
+ stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
+ current_span = [streamoffset, streamoffset + segment.segment_size]
+
+ if current_span is not None:
+ stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
+
+ if not stream[f]:
+ stream_tokens.append("0:0:{0}".format(fout))
+
+ return stream_tokens
+
+class StreamReader(object):
+ def __init__(self, tokens, keep=None, debug=False, _empty=False,
+ num_retries=0):
self._stream_name = None
- self.data_locators = []
- self.files = []
+ self._data_locators = []
+ self._files = collections.OrderedDict()
+ self._keep = keep
+ self.num_retries = num_retries
+
+ streamoffset = 0L
- for tok in self._tokens:
- if self._stream_name == None:
+ # parse stream
+ for tok in tokens:
+ if debug: print 'tok', tok
+ if self._stream_name is None:
self._stream_name = tok.replace('\\040', ' ')
- elif re.search(r'^[0-9a-f]{32}(\+\S+)*$', tok):
- self.data_locators += [tok]
- elif re.search(r'^\d+:\d+:\S+', tok):
- pos, size, name = tok.split(':',2)
- self.files += [[int(pos), int(size), name.replace('\\040', ' ')]]
- else:
- raise errors.SyntaxError("Invalid manifest format")
-
- def tokens(self):
- return self._tokens
-
- def tokens_for_range(self, range_start, range_size):
- resp = [self._stream_name]
- return_all_tokens = False
- block_start = 0
- token_bytes_skipped = 0
- for locator in self.data_locators:
- sizehint = re.search(r'\+(\d+)', locator)
- if not sizehint:
- return_all_tokens = True
- if return_all_tokens:
- resp += [locator]
- next
- blocksize = int(sizehint.group(0))
- if range_start + range_size <= block_start:
- break
- if range_start < block_start + blocksize:
- resp += [locator]
- else:
- token_bytes_skipped += blocksize
- block_start += blocksize
- for f in self.files:
- if ((f[0] < range_start + range_size)
- and
- (f[0] + f[1] > range_start)
- and
- f[1] > 0):
- resp += ["%d:%d:%s" % (f[0] - token_bytes_skipped, f[1], f[2])]
- return resp
+ continue
+
+ s = re.match(r'^[0-9a-f]{32}\+(\d+)(\+\S+)*$', tok)
+ if s:
+ blocksize = long(s.group(1))
+ self._data_locators.append(Range(tok, streamoffset, blocksize))
+ streamoffset += blocksize
+ continue
+
+ s = re.search(r'^(\d+):(\d+):(\S+)', tok)
+ if s:
+ pos = long(s.group(1))
+ size = long(s.group(2))
+ name = s.group(3).replace('\\040', ' ')
+ if name not in self._files:
+ self._files[name] = StreamFileReader(self, [Range(pos, 0, size)], name)
+ else:
+ filereader = self._files[name]
+ filereader.segments.append(Range(pos, filereader.size(), size))
+ continue
+
+ raise errors.SyntaxError("Invalid manifest format")
def name(self):
return self._stream_name
+ def files(self):
+ return self._files
+
def all_files(self):
- for f in self.files:
- pos, size, name = f
- yield StreamFileReader(self, pos, size, name)
-
- def nextdatablock(self):
- if self._current_datablock_index < 0:
- self._current_datablock_pos = 0
- self._current_datablock_index = 0
- else:
- self._current_datablock_pos += self.current_datablock_size()
- self._current_datablock_index += 1
- self._current_datablock_data = None
-
- def current_datablock_data(self):
- if self._current_datablock_data == None:
- self._current_datablock_data = Keep.get(self.data_locators[self._current_datablock_index])
- return self._current_datablock_data
-
- def current_datablock_size(self):
- if self._current_datablock_index < 0:
- self.nextdatablock()
- sizehint = re.search('\+(\d+)', self.data_locators[self._current_datablock_index])
- if sizehint:
- return int(sizehint.group(0))
- return len(self.current_datablock_data())
-
- def seek(self, pos):
- """Set the position of the next read operation."""
- self._pos = pos
-
- def really_seek(self):
- """Find and load the appropriate data block, so the byte at
- _pos is in memory.
- """
- if self._pos == self._current_datablock_pos:
- return True
- if (self._current_datablock_pos != None and
- self._pos >= self._current_datablock_pos and
- self._pos <= self._current_datablock_pos + self.current_datablock_size()):
- return True
- if self._pos < self._current_datablock_pos:
- self._current_datablock_index = -1
- self.nextdatablock()
- while (self._pos > self._current_datablock_pos and
- self._pos > self._current_datablock_pos + self.current_datablock_size()):
- self.nextdatablock()
-
- def read(self, size):
- """Read no more than size bytes -- but at least one byte,
- unless _pos is already at the end of the stream.
- """
+ return self._files.values()
+
+ def _size(self):
+ n = self._data_locators[-1]
+ return n.range_start + n.range_size
+
+ def size(self):
+ return self._size()
+
+ def locators_and_ranges(self, range_start, range_size):
+ return locators_and_ranges(self._data_locators, range_start, range_size)
+
+ @retry_method
+ def _keepget(self, locator, num_retries=None):
+ return self._keep.get(locator, num_retries=num_retries)
+
+ @retry_method
+ def readfrom(self, start, size, num_retries=None):
+ return self._readfrom(start, size, num_retries=num_retries)
+
+ @retry_method
+ def _readfrom(self, start, size, num_retries=None):
+ """Read up to 'size' bytes from the stream, starting at 'start'"""
if size == 0:
return ''
- self.really_seek()
- while self._pos >= self._current_datablock_pos + self.current_datablock_size():
- self.nextdatablock()
- if self._current_datablock_index >= len(self.data_locators):
- return None
- data = self.current_datablock_data()[self._pos - self._current_datablock_pos : self._pos - self._current_datablock_pos + size]
- self._pos += len(data)
- return data
+ if self._keep is None:
+ self._keep = KeepClient(num_retries=self.num_retries)
+ data = []
+ for lr in locators_and_ranges(self._data_locators, start, size):
+ data.append(self._keepget(lr.locator, num_retries=num_retries)[lr.segment_offset:lr.segment_offset+lr.segment_size])
+ return ''.join(data)
+
+ def manifest_text(self, strip=False):
+ manifest_text = [self.name().replace(' ', '\\040')]
+ if strip:
+ for d in self._data_locators:
+ m = re.match(r'^[0-9a-f]{32}\+\d+', d.locator)
+ manifest_text.append(m.group(0))
+ else:
+ manifest_text.extend([d.locator for d in self._data_locators])
+ manifest_text.extend([' '.join(["{}:{}:{}".format(seg.locator, seg.range_size, f.name.replace(' ', '\\040'))
+ for seg in f.segments])
+ for f in self._files.values()])
+ return ' '.join(manifest_text) + '\n'
+
+
+
+
+# class StreamWriter(StreamReader):
+# def __init__(self, tokens, keep=None, debug=False, _empty=False,
+# num_retries=0):
+# super(StreamWriter, self).__init__(tokens, keep, debug, _empty, num_retries)
+
+# if len(self._files) != 1:
+# raise AssertionError("StreamWriter can only have one file at a time")
+# sr = self._files.popitem()[1]
+# self._files[sr.name] = StreamFileWriter(self, sr.segments, sr.name)
+
+# self.mutex = threading.Lock()
+# self.current_bblock = None
+# self.bufferblocks = {}
+
+# # wrap superclass methods in mutex
+# def _proxy_method(name):
+# method = getattr(StreamReader, name)
+# @functools.wraps(method, ('__name__', '__doc__'))
+# def wrapper(self, *args, **kwargs):
+# with self.mutex:
+# return method(self, *args, **kwargs)
+# return wrapper
+
+# for _method_name in ['files', 'all_files', 'size', 'locators_and_ranges', 'readfrom', 'manifest_text']:
+# locals()[_method_name] = _proxy_method(_method_name)
+
+# @retry_method
+# def _keepget(self, locator, num_retries=None):
+# if locator in self.bufferblocks:
+# bb = self.bufferblocks[locator]
+# return str(bb.buffer_block[0:bb.write_pointer])
+# else:
+# return self._keep.get(locator, num_retries=num_retries)
+
+# def _init_bufferblock(self):
+# last = self._data_locators[-1]
+# streamoffset = last.range_start + last.range_size
+# if last.range_size == 0:
+# del self._data_locators[-1]
+# self.current_bblock = BufferBlock("bufferblock%i" % len(self.bufferblocks), streamoffset)
+# self.bufferblocks[self.current_bblock.locator] = self.current_bblock
+# self._data_locators.append(self.current_bblock.locator_list_entry)
+
+# def _repack_writes(self):
+# '''Test if the buffer block has more data than is referenced by actual segments
+# (this happens when a buffered write over-writes a file range written in
+# a previous buffered write). Re-pack the buffer block for efficiency
+# and to avoid leaking information.
+# '''
+# segs = self._files.values()[0].segments
+
+# bufferblock_segs = []
+# i = 0
+# tmp_segs = copy.copy(segs)
+# while i < len(tmp_segs):
+# # Go through each segment and identify segments that include the buffer block
+# s = tmp_segs[i]
+# if s[LOCATOR] < self.current_bblock.locator_list_entry.range_start and (s[LOCATOR] + s.range_size) > self.current_bblock.locator_list_entry.range_start:
+# # The segment straddles the previous block and the current buffer block. Split the segment.
+# b1 = self.current_bblock.locator_list_entry.range_start - s[LOCATOR]
+# b2 = (s[LOCATOR] + s.range_size) - self.current_bblock.locator_list_entry.range_start
+# bb_seg = [self.current_bblock.locator_list_entry.range_start, b2, s.range_start+b1]
+# tmp_segs[i] = [s[LOCATOR], b1, s.range_start]
+# tmp_segs.insert(i+1, bb_seg)
+# bufferblock_segs.append(bb_seg)
+# i += 1
+# elif s[LOCATOR] >= self.current_bblock.locator_list_entry.range_start:
+# # The segment's data is in the buffer block.
+# bufferblock_segs.append(s)
+# i += 1
+
+# # Now sum up the segments to get the total bytes
+# # of the file referencing into the buffer block.
+# write_total = sum([s.range_size for s in bufferblock_segs])
+
+# if write_total < self.current_bblock.locator_list_entry.range_size:
+# # There is more data in the buffer block than is actually accounted for by segments, so
+# # re-pack into a new buffer by copying over to a new buffer block.
+# new_bb = BufferBlock(self.current_bblock.locator,
+# self.current_bblock.locator_list_entry.range_start,
+# starting_size=write_total)
+# for t in bufferblock_segs:
+# t_start = t[LOCATOR] - self.current_bblock.locator_list_entry.range_start
+# t_end = t_start + t.range_size
+# t[0] = self.current_bblock.locator_list_entry.range_start + new_bb.write_pointer
+# new_bb.append(self.current_bblock.buffer_block[t_start:t_end])
+
+# self.current_bblock = new_bb
+# self.bufferblocks[self.current_bblock.locator] = self.current_bblock
+# self._data_locators[-1] = self.current_bblock.locator_list_entry
+# self._files.values()[0].segments = tmp_segs
+
+# def _commit(self):
+# # commit buffer block
+
+# # TODO: do 'put' in the background?
+# pdh = self._keep.put(self.current_bblock.buffer_block[0:self.current_bblock.write_pointer])
+# self._data_locators[-1][0] = pdh
+# self.current_bblock = None
+
+# def commit(self):
+# with self.mutex:
+# self._repack_writes()
+# self._commit()
+
+# def _append(self, data):
+# if len(data) > config.KEEP_BLOCK_SIZE:
+# raise ArgumentError("Please append data chunks smaller than config.KEEP_BLOCK_SIZE")
+
+# if self.current_bblock is None:
+# self._init_bufferblock()
+
+# if (self.current_bblock.write_pointer + len(data)) > config.KEEP_BLOCK_SIZE:
+# self._repack_writes()
+# if (self.current_bblock.write_pointer + len(data)) > config.KEEP_BLOCK_SIZE:
+# self._commit()
+# self._init_bufferblock()
+
+# self.current_bblock.append(data)
+
+# def append(self, data):
+# with self.mutex:
+# self._append(data)