-import gflags
-import httplib
-import httplib2
-import logging
+import collections
+import hashlib
import os
-import pprint
-import sys
-import types
-import subprocess
-import json
-import UserDict
import re
-import hashlib
-import string
-import bz2
-import zlib
-import fcntl
-import time
import threading
+import functools
+import copy
+from .ranges import *
+from .arvfile import ArvadosFileBase, StreamFileReader
+from arvados.retry import retry_method
from keep import *
import config
import errors
-LOCATOR = 0
-BLOCKSIZE = 1
-OFFSET = 2
-SEGMENTSIZE = 3
-
-def locators_and_ranges(data_locators, range_start, range_size):
- '''returns list of [block locator, blocksize, segment offset, segment size] that satisfies the range'''
- resp = []
- range_start = long(range_start)
- range_size = long(range_size)
- range_end = range_start + range_size
- block_start = 0L
- for locator, block_size, block_start in data_locators:
- block_end = block_start + block_size
- if range_end < block_start:
- # range ends before this block starts, so don't look at any more locators
- break
- if range_start > block_end:
- # range starts after this block ends, so go to next block
- next
- elif range_start >= block_start and range_end <= block_end:
- # range starts and ends in this block
- resp.append([locator, block_size, range_start - block_start, range_size])
- elif range_start >= block_start:
- # range starts in this block
- resp.append([locator, block_size, range_start - block_start, block_end - range_start])
- elif range_start < block_start and range_end > block_end:
- # range starts in a previous block and extends to further blocks
- resp.append([locator, block_size, 0L, block_size])
- elif range_start < block_start and range_end <= block_end:
- # range starts in a previous block and ends in this block
- resp.append([locator, block_size, 0L, range_end - block_start])
- block_start = block_end
- return resp
-
-
-class StreamFileReader(object):
- def __init__(self, stream, segments, name):
- self._stream = stream
- self.segments = segments
- self._name = name
- self._filepos = 0L
-
- def name(self):
- return self._name
-
- def decompressed_name(self):
- return re.sub('\.(bz2|gz)$', '', self._name)
-
- def stream_name(self):
- return self._stream.name()
+def locator_block_size(loc):
+ s = re.match(r'[0-9a-f]{32}\+(\d+)(\+\S+)*', loc)
+ return long(s.group(1))
+
+def normalize_stream(s, stream):
+ '''
+ s is the stream name
+ stream is a dict mapping each filename to a list in the form [block locator, block size, segment offset (from beginning of block), segment size]
+ returns the stream as a list of tokens
+ '''
+ stream_tokens = [s]
+ sortedfiles = list(stream.keys())
+ sortedfiles.sort()
+
+ blocks = {}
+ streamoffset = 0L
+ # Go through each file and add each referenced block exactly once.
+ for f in sortedfiles:
+ for b in stream[f]:
+ if b.locator not in blocks:
+ stream_tokens.append(b.locator)
+ blocks[b.locator] = streamoffset
+ streamoffset += locator_block_size(b.locator)
+
+ # Add the empty block if the stream is otherwise empty.
+ if len(stream_tokens) == 1:
+ stream_tokens.append(config.EMPTY_BLOCK_LOCATOR)
+
+ for f in sortedfiles:
+ # Add in file segments
+ current_span = None
+ fout = f.replace(' ', '\\040')
+ for segment in stream[f]:
+ # Collapse adjacent segments
+ streamoffset = blocks[segment.locator] + segment.segment_offset
+ if current_span is None:
+ current_span = [streamoffset, streamoffset + segment.segment_size]
+ else:
+ if streamoffset == current_span[1]:
+ current_span[1] += segment.segment_size
+ else:
+ stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
+ current_span = [streamoffset, streamoffset + segment.segment_size]
- def seek(self, pos):
- self._filepos = min(max(pos, 0L), self.size())
+ if current_span is not None:
+ stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
- def tell(self, pos):
- return self._filepos
+ if not stream[f]:
+ stream_tokens.append("0:0:{0}".format(fout))
- def size(self):
- n = self.segments[-1]
- return n[OFFSET] + n[BLOCKSIZE]
-
- def read(self, size):
- """Read up to 'size' bytes from the stream, starting at the current file position"""
- if size == 0:
- return ''
-
- data = ''
- for locator, blocksize, segmentoffset, segmentsize in locators_and_ranges(self.segments, self._filepos, size):
- self._stream.seek(locator+segmentoffset)
- data += self._stream.read(segmentsize)
- self._filepos += len(data)
- return data
-
- def readall(self, size=2**20):
- while True:
- data = self.read(size)
- if data == '':
- break
- yield data
-
- def bunzip2(self, size):
- decompressor = bz2.BZ2Decompressor()
- for segment in self.readall(size):
- data = decompressor.decompress(segment)
- if data and data != '':
- yield data
-
- def gunzip(self, size):
- decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
- for segment in self.readall(size):
- data = decompressor.decompress(decompressor.unconsumed_tail + segment)
- if data and data != '':
- yield data
-
- def readall_decompressed(self, size=2**20):
- self.seek(0)
- if re.search('\.bz2$', self._name):
- return self.bunzip2(size)
- elif re.search('\.gz$', self._name):
- return self.gunzip(size)
- else:
- return self.readall(size)
-
- def readlines(self, decompress=True):
- if decompress:
- datasource = self.readall_decompressed()
- else:
- self._stream.seek(self._pos + self._filepos)
- datasource = self.readall()
- data = ''
- for newdata in datasource:
- data += newdata
- sol = 0
- while True:
- eol = string.find(data, "\n", sol)
- if eol < 0:
- break
- yield data[sol:eol+1]
- sol = eol+1
- data = data[sol:]
- if data != '':
- yield data
+ return stream_tokens
class StreamReader(object):
- def __init__(self, tokens):
- self._tokens = tokens
- self._pos = 0L
-
+ def __init__(self, tokens, keep=None, debug=False, _empty=False,
+ num_retries=0):
self._stream_name = None
- self.data_locators = []
- self.files = {}
+ self._data_locators = []
+ self._files = collections.OrderedDict()
+ self._keep = keep
+ self.num_retries = num_retries
streamoffset = 0L
- for tok in self._tokens:
- if self._stream_name == None:
+ # parse stream
+ for tok in tokens:
+ if debug: print 'tok', tok
+ if self._stream_name is None:
self._stream_name = tok.replace('\\040', ' ')
continue
s = re.match(r'^[0-9a-f]{32}\+(\d+)(\+\S+)*$', tok)
if s:
blocksize = long(s.group(1))
- self.data_locators.append([tok, blocksize, streamoffset])
+ self._data_locators.append(Range(tok, streamoffset, blocksize))
streamoffset += blocksize
continue
pos = long(s.group(1))
size = long(s.group(2))
name = s.group(3).replace('\\040', ' ')
- if name not in self.files:
- self.files[name] = StreamFileReader(self, [[pos, size, 0]], name)
+ if name not in self._files:
+ self._files[name] = StreamFileReader(self, [Range(pos, 0, size)], name)
else:
- n = self.files[name]
- n.segments.append([pos, size, n.size()])
+ filereader = self._files[name]
+ filereader.segments.append(Range(pos, filereader.size(), size))
continue
raise errors.SyntaxError("Invalid manifest format")
-
- def tokens(self):
- return self._tokens
def name(self):
return self._stream_name
- def all_files(self):
- return self.files.values()
+ def files(self):
+ return self._files
- def seek(self, pos):
- """Set the position of the next read operation."""
- self._pos = pos
+ def all_files(self):
+ return self._files.values()
- def tell(self):
- return self._pos
+ def _size(self):
+ n = self._data_locators[-1]
+ return n.range_start + n.range_size
def size(self):
- n = self.data_locators[-1]
- return n[self.OFFSET] + n[self.BLOCKSIZE]
+ return self._size()
def locators_and_ranges(self, range_start, range_size):
- return locators_and_ranges(self.data_locators, range_start, range_size)
+ return locators_and_ranges(self._data_locators, range_start, range_size)
+
+ @retry_method
+ def _keepget(self, locator, num_retries=None):
+ return self._keep.get(locator, num_retries=num_retries)
- def read(self, size):
- """Read up to 'size' bytes from the stream, starting at the current file position"""
+ @retry_method
+ def readfrom(self, start, size, num_retries=None):
+ return self._readfrom(start, size, num_retries=num_retries)
+
+ @retry_method
+ def _readfrom(self, start, size, num_retries=None):
+ """Read up to 'size' bytes from the stream, starting at 'start'"""
if size == 0:
return ''
- data = ''
- for locator, blocksize, segmentoffset, segmentsize in locators_and_ranges(self.data_locators, self._pos, size):
- data += Keep.get(locator)[segmentoffset:segmentoffset+segmentsize]
- self._pos += len(data)
- return data
+ if self._keep is None:
+ self._keep = KeepClient(num_retries=self.num_retries)
+ data = []
+ for lr in locators_and_ranges(self._data_locators, start, size):
+ data.append(self._keepget(lr.locator, num_retries=num_retries)[lr.segment_offset:lr.segment_offset+lr.segment_size])
+ return ''.join(data)
+
+ def manifest_text(self, strip=False):
+ manifest_text = [self.name().replace(' ', '\\040')]
+ if strip:
+ for d in self._data_locators:
+ m = re.match(r'^[0-9a-f]{32}\+\d+', d.locator)
+ manifest_text.append(m.group(0))
+ else:
+ manifest_text.extend([d.locator for d in self._data_locators])
+ manifest_text.extend([' '.join(["{}:{}:{}".format(seg.locator, seg.range_size, f.name.replace(' ', '\\040'))
+ for seg in f.segments])
+ for f in self._files.values()])
+ return ' '.join(manifest_text) + '\n'
+
+
+
+
+# class StreamWriter(StreamReader):
+# def __init__(self, tokens, keep=None, debug=False, _empty=False,
+# num_retries=0):
+# super(StreamWriter, self).__init__(tokens, keep, debug, _empty, num_retries)
+
+# if len(self._files) != 1:
+# raise AssertionError("StreamWriter can only have one file at a time")
+# sr = self._files.popitem()[1]
+# self._files[sr.name] = StreamFileWriter(self, sr.segments, sr.name)
+
+# self.mutex = threading.Lock()
+# self.current_bblock = None
+# self.bufferblocks = {}
+
+# # wrap superclass methods in mutex
+# def _proxy_method(name):
+# method = getattr(StreamReader, name)
+# @functools.wraps(method, ('__name__', '__doc__'))
+# def wrapper(self, *args, **kwargs):
+# with self.mutex:
+# return method(self, *args, **kwargs)
+# return wrapper
+
+# for _method_name in ['files', 'all_files', 'size', 'locators_and_ranges', 'readfrom', 'manifest_text']:
+# locals()[_method_name] = _proxy_method(_method_name)
+
+# @retry_method
+# def _keepget(self, locator, num_retries=None):
+# if locator in self.bufferblocks:
+# bb = self.bufferblocks[locator]
+# return str(bb.buffer_block[0:bb.write_pointer])
+# else:
+# return self._keep.get(locator, num_retries=num_retries)
+
+# def _init_bufferblock(self):
+# last = self._data_locators[-1]
+# streamoffset = last.range_start + last.range_size
+# if last.range_size == 0:
+# del self._data_locators[-1]
+# self.current_bblock = BufferBlock("bufferblock%i" % len(self.bufferblocks), streamoffset)
+# self.bufferblocks[self.current_bblock.locator] = self.current_bblock
+# self._data_locators.append(self.current_bblock.locator_list_entry)
+
+# def _repack_writes(self):
+# '''Test if the buffer block has more data than is referenced by actual segments
+# (this happens when a buffered write over-writes a file range written in
+# a previous buffered write). Re-pack the buffer block for efficiency
+# and to avoid leaking information.
+# '''
+# segs = self._files.values()[0].segments
+
+# bufferblock_segs = []
+# i = 0
+# tmp_segs = copy.copy(segs)
+# while i < len(tmp_segs):
+# # Go through each segment and identify segments that include the buffer block
+# s = tmp_segs[i]
+# if s[LOCATOR] < self.current_bblock.locator_list_entry.range_start and (s[LOCATOR] + s.range_size) > self.current_bblock.locator_list_entry.range_start:
+# # The segment straddles the previous block and the current buffer block. Split the segment.
+# b1 = self.current_bblock.locator_list_entry.range_start - s[LOCATOR]
+# b2 = (s[LOCATOR] + s.range_size) - self.current_bblock.locator_list_entry.range_start
+# bb_seg = [self.current_bblock.locator_list_entry.range_start, b2, s.range_start+b1]
+# tmp_segs[i] = [s[LOCATOR], b1, s.range_start]
+# tmp_segs.insert(i+1, bb_seg)
+# bufferblock_segs.append(bb_seg)
+# i += 1
+# elif s[LOCATOR] >= self.current_bblock.locator_list_entry.range_start:
+# # The segment's data is in the buffer block.
+# bufferblock_segs.append(s)
+# i += 1
+
+# # Now sum up the segments to get the total bytes
+# # of the file referencing into the buffer block.
+# write_total = sum([s.range_size for s in bufferblock_segs])
+
+# if write_total < self.current_bblock.locator_list_entry.range_size:
+# # There is more data in the buffer block than is actually accounted for by segments, so
+# # re-pack into a new buffer by copying over to a new buffer block.
+# new_bb = BufferBlock(self.current_bblock.locator,
+# self.current_bblock.locator_list_entry.range_start,
+# starting_size=write_total)
+# for t in bufferblock_segs:
+# t_start = t[LOCATOR] - self.current_bblock.locator_list_entry.range_start
+# t_end = t_start + t.range_size
+# t[0] = self.current_bblock.locator_list_entry.range_start + new_bb.write_pointer
+# new_bb.append(self.current_bblock.buffer_block[t_start:t_end])
+
+# self.current_bblock = new_bb
+# self.bufferblocks[self.current_bblock.locator] = self.current_bblock
+# self._data_locators[-1] = self.current_bblock.locator_list_entry
+# self._files.values()[0].segments = tmp_segs
+
+# def _commit(self):
+# # commit buffer block
+
+# # TODO: do 'put' in the background?
+# pdh = self._keep.put(self.current_bblock.buffer_block[0:self.current_bblock.write_pointer])
+# self._data_locators[-1][0] = pdh
+# self.current_bblock = None
+
+# def commit(self):
+# with self.mutex:
+# self._repack_writes()
+# self._commit()
+
+# def _append(self, data):
+# if len(data) > config.KEEP_BLOCK_SIZE:
+# raise ArgumentError("Please append data chunks smaller than config.KEEP_BLOCK_SIZE")
+
+# if self.current_bblock is None:
+# self._init_bufferblock()
+
+# if (self.current_bblock.write_pointer + len(data)) > config.KEEP_BLOCK_SIZE:
+# self._repack_writes()
+# if (self.current_bblock.write_pointer + len(data)) > config.KEEP_BLOCK_SIZE:
+# self._commit()
+# self._init_bufferblock()
+
+# self.current_bblock.append(data)
+
+# def append(self, data):
+# with self.mutex:
+# self._append(data)