Merge branch 'master' into 4823-python-sdk-writable-collection-api
authorPeter Amstutz <peter.amstutz@curoverse.com>
Tue, 17 Feb 2015 17:16:14 +0000 (12:16 -0500)
committerPeter Amstutz <peter.amstutz@curoverse.com>
Tue, 17 Feb 2015 17:16:14 +0000 (12:16 -0500)
18 files changed:
crunch_scripts/split-fastq.py
sdk/python/arvados/__init__.py
sdk/python/arvados/api.py
sdk/python/arvados/arvfile.py
sdk/python/arvados/collection.py
sdk/python/arvados/commands/put.py
sdk/python/arvados/config.py
sdk/python/arvados/keep.py
sdk/python/arvados/ranges.py [new file with mode: 0644]
sdk/python/arvados/safeapi.py [new file with mode: 0644]
sdk/python/arvados/stream.py
sdk/python/tests/arvados_testutil.py
sdk/python/tests/test_arvfile.py [new file with mode: 0644]
sdk/python/tests/test_collections.py
sdk/python/tests/test_keep_client.py
sdk/python/tests/test_stream.py
services/fuse/arvados_fuse/__init__.py
services/fuse/bin/arv-mount

index 17aabf2930393a48d3539483d5198cab5af35631..1c7a36871d4e2e469d0489ef3fd402e0d54474cf 100755 (executable)
@@ -16,8 +16,6 @@ inp = arvados.CollectionReader(arvados.getjobparam('reads'))
 
 manifest_list = []
 
-chunking = False #arvados.getjobparam('chunking')
-
 def nextline(reader, start):
     n = -1
     while True:
@@ -31,63 +29,6 @@ def nextline(reader, start):
             start += 128
     return n
 
-# Chunk a fastq into approximately 64 MiB chunks.  Requires that the input data
-# be decompressed ahead of time, such as using decompress-all.py.  Generates a
-# new manifest, but doesn't actually move any data around.  Handles paired
-# reads by ensuring that each chunk of a pair gets the same number of records.
-#
-# This works, but in practice is so slow that potential gains in alignment
-# performance are lost in the prep time, which is why it is currently disabled.
-#
-# A better algorithm would seek to a file position a bit less than the desired
-# chunk size and then scan ahead for the next record, making sure that record
-# was matched by the read pair.
-def splitfastq(p):
-    for i in xrange(0, len(p)):
-        p[i]["start"] = 0
-        p[i]["end"] = 0
-
-    count = 0
-    recordsize = [0, 0]
-
-    global piece
-    finish = False
-    while not finish:
-        for i in xrange(0, len(p)):
-            recordsize[i] = 0
-
-        # read next 4 lines
-        for i in xrange(0, len(p)):
-            for ln in xrange(0, 4):
-                r = nextline(p[i]["reader"], p[i]["end"]+recordsize[i])
-                if r == -1:
-                    finish = True
-                    break
-                recordsize[i] += (r+1)
-
-        splitnow = finish
-        for i in xrange(0, len(p)):
-            if ((p[i]["end"] - p[i]["start"]) + recordsize[i]) >= (64*1024*1024):
-                splitnow = True
-
-        if splitnow:
-            for i in xrange(0, len(p)):
-                global manifest_list
-                print >>sys.stderr, "Finish piece ./_%s/%s (%s %s)" % (piece, p[i]["reader"].name(), p[i]["start"], p[i]["end"])
-                manifest = []
-                manifest.extend(["./_" + str(piece)])
-                manifest.extend([d[arvados.LOCATOR] for d in p[i]["reader"]._stream._data_locators])
-                manifest.extend(["{}:{}:{}".format(seg[arvados.LOCATOR]+seg[arvados.OFFSET], seg[arvados.SEGMENTSIZE], p[i]["reader"].name().replace(' ', '\\040')) for seg in arvados.locators_and_ranges(p[i]["reader"].segments, p[i]["start"], p[i]["end"] - p[i]["start"])])
-                manifest_list.append(manifest)
-                p[i]["start"] = p[i]["end"]
-            piece += 1
-        else:
-            for i in xrange(0, len(p)):
-                p[i]["end"] += recordsize[i]
-            count += 1
-            if count % 10000 == 0:
-                print >>sys.stderr, "Record %s at %s" % (count, p[i]["end"])
-
 prog = re.compile(r'(.*?)(_[12])?\.fastq(\.gz)?$')
 
 # Look for fastq files
@@ -115,14 +56,11 @@ for s in inp.all_streams():
                 p[0]["reader"] = s.files()[name_pieces.group(0)]
 
             if p is not None:
-                if chunking:
-                    splitfastq(p)
-                else:
-                    for i in xrange(0, len(p)):
-                        m = p[i]["reader"].as_manifest().split()
-                        m[0] = "./_" + str(piece)
-                        manifest_list.append(m)
-                    piece += 1
+                for i in xrange(0, len(p)):
+                    m = p[i]["reader"].as_manifest().split()
+                    m[0] = "./_" + str(piece)
+                    manifest_list.append(m)
+                piece += 1
 
 manifest_text = "\n".join(" ".join(m) for m in manifest_list) + "\n"
 
index 4cae20d597d3e230c1fbd88b5c15f881d776c03a..e4d148e1ca1beedb95fa71bbe5b7a8eb1fb755d6 100644 (file)
@@ -19,9 +19,10 @@ import time
 import threading
 
 from api import *
-from collection import *
+from collection import CollectionReader, CollectionWriter, ResumableCollectionWriter
 from keep import *
 from stream import *
+from arvfile import StreamFileReader
 import errors
 import util
 
@@ -131,5 +132,3 @@ class job_setup:
                                        body={'success':True}
                                        ).execute()
             exit(0)
-
-
index 2f1f74044d78a7fee4ce6337f1d649b69a587df6..a44d330035f98c3117bbdac509b272283de9d52f 100644 (file)
@@ -73,7 +73,7 @@ def http_cache(data_type):
         path = None
     return path
 
-def api(version=None, cache=True, host=None, token=None, insecure=False, **kwargs):
+def api(version=None, cache=True, host=None, token=None, insecure=False, apiconfig=None, **kwargs):
     """Return an apiclient Resources object for an Arvados instance.
 
     Arguments:
@@ -84,6 +84,7 @@ def api(version=None, cache=True, host=None, token=None, insecure=False, **kwarg
     * host: The Arvados API server host (and optional :port) to connect to.
     * token: The authentication token to send with each API call.
     * insecure: If True, ignore SSL certificate validation errors.
+    * apiconfig: If provided, this should be a dict containing with entries for ARVADOS_API_HOST, ARVADOS_API_TOKEN, and optionally ARVADOS_API_HOST_INSECURE
 
     Additional keyword arguments will be passed directly to
     `apiclient_discovery.build` if a new Resource object is created.
@@ -110,12 +111,14 @@ def api(version=None, cache=True, host=None, token=None, insecure=False, **kwarg
         pass
     elif not host and not token:
         # Load from user configuration or environment
+        if apiconfig is None:
+            apiconfig = config.settings()
         for x in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:
-            if x not in config.settings():
+            if x not in apiconfig:
                 raise ValueError("%s is not set. Aborting." % x)
-        host = config.get('ARVADOS_API_HOST')
-        token = config.get('ARVADOS_API_TOKEN')
-        insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE')
+        host = apiconfig.get('ARVADOS_API_HOST')
+        token = apiconfig.get('ARVADOS_API_TOKEN')
+        insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE', apiconfig)
     else:
         # Caller provided one but not the other
         if not host:
index e8dac463e5670818e8bf2495a31c9b05640a1e0d..dd48b99d39f0a1287d686761d5b1c75180ed2c9d 100644 (file)
@@ -1,6 +1,33 @@
 import functools
+import os
+import zlib
+import bz2
+from .ranges import *
+from arvados.retry import retry_method
+import config
+import hashlib
+import hashlib
+import threading
+import Queue
+import copy
+import errno
+from .errors import KeepWriteError, AssertionError
+from .keep import KeepLocator
 
-class ArvadosFileBase(object):
+def split(path):
+    """Separate the stream name and file name in a /-separated stream path and
+    return a tuple (stream_name, file_name).
+
+    If no stream name is available, assume '.'.
+
+    """
+    try:
+        stream_name, file_name = path.rsplit('/', 1)
+    except ValueError:  # No / in string
+        stream_name, file_name = '.', path
+    return stream_name, file_name
+
+class FileLikeObjectBase(object):
     def __init__(self, name, mode):
         self.name = name
         self.mode = mode
@@ -9,11 +36,11 @@ class ArvadosFileBase(object):
     @staticmethod
     def _before_close(orig_func):
         @functools.wraps(orig_func)
-        def wrapper(self, *args, **kwargs):
+        def before_close_wrapper(self, *args, **kwargs):
             if self.closed:
                 raise ValueError("I/O operation on closed stream file")
             return orig_func(self, *args, **kwargs)
-        return wrapper
+        return before_close_wrapper
 
     def __enter__(self):
         return self
@@ -27,3 +54,836 @@ class ArvadosFileBase(object):
 
     def close(self):
         self.closed = True
+
+
+class ArvadosFileReaderBase(FileLikeObjectBase):
+    class _NameAttribute(str):
+        # The Python file API provides a plain .name attribute.
+        # Older SDK provided a name() method.
+        # This class provides both, for maximum compatibility.
+        def __call__(self):
+            return self
+
+    def __init__(self, name, mode, num_retries=None):
+        super(ArvadosFileReaderBase, self).__init__(self._NameAttribute(name), mode)
+        self._filepos = 0L
+        self.num_retries = num_retries
+        self._readline_cache = (None, None)
+
+    def __iter__(self):
+        while True:
+            data = self.readline()
+            if not data:
+                break
+            yield data
+
+    def decompressed_name(self):
+        return re.sub('\.(bz2|gz)$', '', self.name)
+
+    @FileLikeObjectBase._before_close
+    def seek(self, pos, whence=os.SEEK_CUR):
+        if whence == os.SEEK_CUR:
+            pos += self._filepos
+        elif whence == os.SEEK_END:
+            pos += self.size()
+        self._filepos = min(max(pos, 0L), self.size())
+
+    def tell(self):
+        return self._filepos
+
+    @FileLikeObjectBase._before_close
+    @retry_method
+    def readall(self, size=2**20, num_retries=None):
+        while True:
+            data = self.read(size, num_retries=num_retries)
+            if data == '':
+                break
+            yield data
+
+    @FileLikeObjectBase._before_close
+    @retry_method
+    def readline(self, size=float('inf'), num_retries=None):
+        cache_pos, cache_data = self._readline_cache
+        if self.tell() == cache_pos:
+            data = [cache_data]
+        else:
+            data = ['']
+        data_size = len(data[-1])
+        while (data_size < size) and ('\n' not in data[-1]):
+            next_read = self.read(2 ** 20, num_retries=num_retries)
+            if not next_read:
+                break
+            data.append(next_read)
+            data_size += len(next_read)
+        data = ''.join(data)
+        try:
+            nextline_index = data.index('\n') + 1
+        except ValueError:
+            nextline_index = len(data)
+        nextline_index = min(nextline_index, size)
+        self._readline_cache = (self.tell(), data[nextline_index:])
+        return data[:nextline_index]
+
+    @FileLikeObjectBase._before_close
+    @retry_method
+    def decompress(self, decompress, size, num_retries=None):
+        for segment in self.readall(size, num_retries):
+            data = decompress(segment)
+            if data:
+                yield data
+
+    @FileLikeObjectBase._before_close
+    @retry_method
+    def readall_decompressed(self, size=2**20, num_retries=None):
+        self.seek(0)
+        if self.name.endswith('.bz2'):
+            dc = bz2.BZ2Decompressor()
+            return self.decompress(dc.decompress, size,
+                                   num_retries=num_retries)
+        elif self.name.endswith('.gz'):
+            dc = zlib.decompressobj(16+zlib.MAX_WBITS)
+            return self.decompress(lambda segment: dc.decompress(dc.unconsumed_tail + segment),
+                                   size, num_retries=num_retries)
+        else:
+            return self.readall(size, num_retries=num_retries)
+
+    @FileLikeObjectBase._before_close
+    @retry_method
+    def readlines(self, sizehint=float('inf'), num_retries=None):
+        data = []
+        data_size = 0
+        for s in self.readall(num_retries=num_retries):
+            data.append(s)
+            data_size += len(s)
+            if data_size >= sizehint:
+                break
+        return ''.join(data).splitlines(True)
+
+    def size(self):
+        raise NotImplementedError()
+
+    def read(self, size, num_retries=None):
+        raise NotImplementedError()
+
+    def readfrom(self, start, size, num_retries=None):
+        raise NotImplementedError()
+
+
+class StreamFileReader(ArvadosFileReaderBase):
+    def __init__(self, stream, segments, name):
+        super(StreamFileReader, self).__init__(name, 'rb', num_retries=stream.num_retries)
+        self._stream = stream
+        self.segments = segments
+
+    def stream_name(self):
+        return self._stream.name()
+
+    def size(self):
+        n = self.segments[-1]
+        return n.range_start + n.range_size
+
+    @FileLikeObjectBase._before_close
+    @retry_method
+    def read(self, size, num_retries=None):
+        """Read up to 'size' bytes from the stream, starting at the current file position"""
+        if size == 0:
+            return ''
+
+        data = ''
+        available_chunks = locators_and_ranges(self.segments, self._filepos, size)
+        if available_chunks:
+            lr = available_chunks[0]
+            data = self._stream._readfrom(lr.locator+lr.segment_offset,
+                                          lr.segment_size,
+                                          num_retries=num_retries)
+
+        self._filepos += len(data)
+        return data
+
+    @FileLikeObjectBase._before_close
+    @retry_method
+    def readfrom(self, start, size, num_retries=None):
+        """Read up to 'size' bytes from the stream, starting at 'start'"""
+        if size == 0:
+            return ''
+
+        data = []
+        for lr in locators_and_ranges(self.segments, start, size):
+            data.append(self._stream._readfrom(lr.locator+lr.segment_offset, lr.segment_size,
+                                              num_retries=num_retries))
+        return ''.join(data)
+
+    def as_manifest(self):
+        from stream import normalize_stream
+        segs = []
+        for r in self.segments:
+            segs.extend(self._stream.locators_and_ranges(r.locator, r.range_size))
+        return " ".join(normalize_stream(".", {self.name: segs})) + "\n"
+
+
+def synchronized(orig_func):
+    @functools.wraps(orig_func)
+    def synchronized_wrapper(self, *args, **kwargs):
+        with self.lock:
+            return orig_func(self, *args, **kwargs)
+    return synchronized_wrapper
+
+class BufferBlock(object):
+    """A BufferBlock is a stand-in for a Keep block that is in the process of being
+    written.
+
+    Writers can append to it, get the size, and compute the Keep locator.
+    There are three valid states:
+
+    WRITABLE
+      Can append to block.
+
+    PENDING
+      Block is in the process of being uploaded to Keep, append is an error.
+
+    COMMITTED
+      The block has been written to Keep, its internal buffer has been
+      released, fetching the block will fetch it via keep client (since we
+      discarded the internal copy), and identifiers referring to the BufferBlock
+      can be replaced with the block locator.
+
+    """
+
+    WRITABLE = 0
+    PENDING = 1
+    COMMITTED = 2
+
+    def __init__(self, blockid, starting_capacity, owner):
+        """
+        :blockid:
+          the identifier for this block
+
+        :starting_capacity:
+          the initial buffer capacity
+
+        :owner:
+          ArvadosFile that owns this block
+
+        """
+        self.blockid = blockid
+        self.buffer_block = bytearray(starting_capacity)
+        self.buffer_view = memoryview(self.buffer_block)
+        self.write_pointer = 0
+        self._state = BufferBlock.WRITABLE
+        self._locator = None
+        self.owner = owner
+        self.lock = threading.Lock()
+
+    @synchronized
+    def append(self, data):
+        """Append some data to the buffer.
+
+        Only valid if the block is in WRITABLE state.  Implements an expanding
+        buffer, doubling capacity as needed to accomdate all the data.
+
+        """
+        if self._state == BufferBlock.WRITABLE:
+            while (self.write_pointer+len(data)) > len(self.buffer_block):
+                new_buffer_block = bytearray(len(self.buffer_block) * 2)
+                new_buffer_block[0:self.write_pointer] = self.buffer_block[0:self.write_pointer]
+                self.buffer_block = new_buffer_block
+                self.buffer_view = memoryview(self.buffer_block)
+            self.buffer_view[self.write_pointer:self.write_pointer+len(data)] = data
+            self.write_pointer += len(data)
+            self._locator = None
+        else:
+            raise AssertionError("Buffer block is not writable")
+
+    def set_state(self, nextstate):
+        if ((self._state == BufferBlock.WRITABLE and nextstate == BufferBlock.PENDING) or
+            (self._state == BufferBlock.PENDING and nextstate == BufferBlock.COMMITTED)):
+            self._state = nextstate
+        else:
+            raise AssertionError("Invalid state change from %s to %s" % (self.state, state))
+
+    @synchronized
+    def state(self):
+        return self._state
+
+    def size(self):
+        """The amount of data written to the buffer."""
+        return self.write_pointer
+
+    @synchronized
+    def locator(self):
+        """The Keep locator for this buffer's contents."""
+        if self._locator is None:
+            self._locator = "%s+%i" % (hashlib.md5(self.buffer_view[0:self.write_pointer]).hexdigest(), self.size())
+        return self._locator
+
+
+class NoopLock(object):
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        pass
+
+    def acquire(self, blocking=False):
+        pass
+
+    def release(self):
+        pass
+
+SYNC_READONLY = 1
+SYNC_EXPLICIT = 2
+SYNC_LIVE = 3
+
+def must_be_writable(orig_func):
+    @functools.wraps(orig_func)
+    def must_be_writable_wrapper(self, *args, **kwargs):
+        if self.sync_mode() == SYNC_READONLY:
+            raise IOError((errno.EROFS, "Collection is read only"))
+        return orig_func(self, *args, **kwargs)
+    return must_be_writable_wrapper
+
+
+class BlockManager(object):
+    """BlockManager handles buffer blocks, background block uploads, and background
+    block prefetch for a Collection of ArvadosFiles.
+
+    """
+    def __init__(self, keep):
+        """keep: KeepClient object to use"""
+        self._keep = keep
+        self._bufferblocks = {}
+        self._put_queue = None
+        self._put_errors = None
+        self._put_threads = None
+        self._prefetch_queue = None
+        self._prefetch_threads = None
+        self.lock = threading.Lock()
+        self.prefetch_enabled = True
+        self.num_put_threads = 2
+        self.num_get_threads = 2
+
+    @synchronized
+    def alloc_bufferblock(self, blockid=None, starting_capacity=2**14, owner=None):
+        """Allocate a new, empty bufferblock in WRITABLE state and return it.
+
+        :blockid:
+          optional block identifier, otherwise one will be automatically assigned
+
+        :starting_capacity:
+          optional capacity, otherwise will use default capacity
+
+        :owner:
+          ArvadosFile that owns this block
+
+        """
+        if blockid is None:
+            blockid = "bufferblock%i" % len(self._bufferblocks)
+        bufferblock = BufferBlock(blockid, starting_capacity=starting_capacity, owner=owner)
+        self._bufferblocks[bufferblock.blockid] = bufferblock
+        return bufferblock
+
+    @synchronized
+    def dup_block(self, block, owner):
+        """Create a new bufferblock in WRITABLE state, initialized with the content of
+        an existing bufferblock.
+
+        :block:
+          the buffer block to copy.
+
+        :owner:
+          ArvadosFile that owns the new block
+
+        """
+        new_blockid = "bufferblock%i" % len(self._bufferblocks)
+        with block.lock:
+            if block._state == BufferBlock.COMMITTED:
+                raise AssertionError("Can only duplicate a writable or pending buffer block")
+
+            bufferblock = BufferBlock(new_blockid, block.size(), owner)
+            bufferblock.append(block.buffer_view[0:block.size()])
+        self._bufferblocks[bufferblock.blockid] = bufferblock
+        return bufferblock
+
+    @synchronized
+    def is_bufferblock(self, locator):
+        return locator in self._bufferblocks
+
+    @synchronized
+    def stop_threads(self):
+        """Shut down and wait for background upload and download threads to finish."""
+
+        if self._put_threads is not None:
+            for t in self._put_threads:
+                self._put_queue.put(None)
+            for t in self._put_threads:
+                t.join()
+        self._put_threads = None
+        self._put_queue = None
+        self._put_errors = None
+
+        if self._prefetch_threads is not None:
+            for t in self._prefetch_threads:
+                self._prefetch_queue.put(None)
+            for t in self._prefetch_threads:
+                t.join()
+        self._prefetch_threads = None
+        self._prefetch_queue = None
+
+    def commit_bufferblock(self, block):
+        """Initiate a background upload of a bufferblock.
+
+        This will block if the upload queue is at capacity, otherwise it will
+        return immediately.
+
+        """
+
+        def commit_bufferblock_worker(self):
+            """Background uploader thread."""
+
+            while True:
+                try:
+                    bufferblock = self._put_queue.get()
+                    if bufferblock is None:
+                        return
+                    loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes())
+                    with bufferblock.lock:
+                        bufferblock._locator = loc
+                        bufferblock.buffer_view = None
+                        bufferblock.buffer_block = None
+                        bufferblock.set_state(BufferBlock.COMMITTED)
+
+                except Exception as e:
+                    print e
+                    self._put_errors.put((bufferblock.locator(), e))
+                finally:
+                    if self._put_queue is not None:
+                        self._put_queue.task_done()
+
+        with self.lock:
+            if self._put_threads is None:
+                # Start uploader threads.
+
+                # If we don't limit the Queue size, the upload queue can quickly
+                # grow to take up gigabytes of RAM if the writing process is
+                # generating data more quickly than it can be send to the Keep
+                # servers.
+                #
+                # With two upload threads and a queue size of 2, this means up to 4
+                # blocks pending.  If they are full 64 MiB blocks, that means up to
+                # 256 MiB of internal buffering, which is the same size as the
+                # default download block cache in KeepClient.
+                self._put_queue = Queue.Queue(maxsize=2)
+                self._put_errors = Queue.Queue()
+
+                self._put_threads = []
+                for i in xrange(0, self.num_put_threads):
+                    thread = threading.Thread(target=commit_bufferblock_worker, args=(self,))
+                    self._put_threads.append(thread)
+                    thread.daemon = True
+                    thread.start()
+
+        # Mark the block as PENDING so to disallow any more appends.
+        with block.lock:
+            block.set_state(BufferBlock.PENDING)
+        self._put_queue.put(block)
+
+    @synchronized
+    def get_bufferblock(self, locator):
+        return self._bufferblocks.get(locator)
+
+    def get_block_contents(self, locator, num_retries, cache_only=False):
+        """Fetch a block.
+
+        First checks to see if the locator is a BufferBlock and return that, if
+        not, passes the request through to KeepClient.get().
+
+        """
+        with self.lock:
+            if locator in self._bufferblocks:
+                bufferblock = self._bufferblocks[locator]
+                if bufferblock.state() != BufferBlock.COMMITTED:
+                    return bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes()
+                else:
+                    locator = bufferblock._locator
+        if cache_only:
+            return self._keep.get_from_cache(locator)
+        else:
+            return self._keep.get(locator, num_retries=num_retries)
+
+    def commit_all(self):
+        """Commit all outstanding buffer blocks.
+
+        Unlike commit_bufferblock(), this is a synchronous call, and will not
+        return until all buffer blocks are uploaded.  Raises
+        KeepWriteError() if any blocks failed to upload.
+
+        """
+        with self.lock:
+            items = self._bufferblocks.items()
+
+        for k,v in items:
+            if v.state() == BufferBlock.WRITABLE:
+                self.commit_bufferblock(v)
+
+        with self.lock:
+            if self._put_queue is not None:
+                self._put_queue.join()
+
+                if not self._put_errors.empty():
+                    err = []
+                    try:
+                        while True:
+                            err.append(self._put_errors.get(False))
+                    except Queue.Empty:
+                        pass
+                    raise KeepWriteError("Error writing some blocks", err)
+
+    def block_prefetch(self, locator):
+        """Initiate a background download of a block.
+
+        This assumes that the underlying KeepClient implements a block cache,
+        so repeated requests for the same block will not result in repeated
+        downloads (unless the block is evicted from the cache.)  This method
+        does not block.
+
+        """
+
+        if not self.prefetch_enabled:
+            return
+
+        def block_prefetch_worker(self):
+            """The background downloader thread."""
+            while True:
+                try:
+                    b = self._prefetch_queue.get()
+                    if b is None:
+                        return
+                    self._keep.get(b)
+                except:
+                    pass
+
+        with self.lock:
+            if locator in self._bufferblocks:
+                return
+            if self._prefetch_threads is None:
+                self._prefetch_queue = Queue.Queue()
+                self._prefetch_threads = []
+                for i in xrange(0, self.num_get_threads):
+                    thread = threading.Thread(target=block_prefetch_worker, args=(self,))
+                    self._prefetch_threads.append(thread)
+                    thread.daemon = True
+                    thread.start()
+        self._prefetch_queue.put(locator)
+
+
+class ArvadosFile(object):
+    """ArvadosFile manages the underlying representation of a file in Keep as a
+    sequence of segments spanning a set of blocks, and implements random
+    read/write access.
+
+    This object may be accessed from multiple threads.
+
+    """
+
+    def __init__(self, parent, stream=[], segments=[]):
+        """
+        ArvadosFile constructor.
+
+        :stream:
+          a list of Range objects representing a block stream
+
+        :segments:
+          a list of Range objects representing segments
+        """
+        self.parent = parent
+        self._modified = True
+        self._segments = []
+        self.lock = parent.root_collection().lock
+        for s in segments:
+            self._add_segment(stream, s.locator, s.range_size)
+        self._current_bblock = None
+
+    def sync_mode(self):
+        return self.parent.sync_mode()
+
+    @synchronized
+    def segments(self):
+        return copy.copy(self._segments)
+
+    @synchronized
+    def clone(self, new_parent):
+        """Make a copy of this file."""
+        cp = ArvadosFile(new_parent)
+        cp.replace_contents(self)
+        return cp
+
+    @must_be_writable
+    @synchronized
+    def replace_contents(self, other):
+        """Replace segments of this file with segments from another `ArvadosFile` object."""
+
+        map_loc = {}
+        self._segments = []
+        for r in other.segments():
+            new_loc = r.locator
+            if other.parent._my_block_manager().is_bufferblock(r.locator):
+                if r.locator not in map_loc:
+                    bufferblock = other.parent._my_block_manager().get_bufferblock(r.locator)
+                    if bufferblock.state() != BufferBlock.WRITABLE:
+                        map_loc[r.locator] = bufferblock.locator()
+                    else:
+                        map_loc[r.locator] = self.parent._my_block_manager().dup_block(bufferblock, self).blockid
+                new_loc = map_loc[r.locator]
+
+            self._segments.append(Range(new_loc, r.range_start, r.range_size, r.segment_offset))
+
+        self._modified = True
+
+    def __eq__(self, other):
+        if other is self:
+            return True
+        if not isinstance(other, ArvadosFile):
+            return False
+
+        othersegs = other.segments()
+        with self.lock:
+            if len(self._segments) != len(othersegs):
+                return False
+            for i in xrange(0, len(othersegs)):
+                seg1 = self._segments[i]
+                seg2 = othersegs[i]
+                loc1 = seg1.locator
+                loc2 = seg2.locator
+
+                if self.parent._my_block_manager().is_bufferblock(loc1):
+                    loc1 = self.parent._my_block_manager().get_bufferblock(loc1).locator()
+
+                if other.parent._my_block_manager().is_bufferblock(loc2):
+                    loc2 = other.parent._my_block_manager().get_bufferblock(loc2).locator()
+
+                if (KeepLocator(loc1).stripped() != KeepLocator(loc2).stripped() or
+                    seg1.range_start != seg2.range_start or
+                    seg1.range_size != seg2.range_size or
+                    seg1.segment_offset != seg2.segment_offset):
+                    return False
+
+        return True
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    @synchronized
+    def set_unmodified(self):
+        """Clear the modified flag"""
+        self._modified = False
+
+    @synchronized
+    def modified(self):
+        """Test the modified flag"""
+        return self._modified
+
+    @must_be_writable
+    @synchronized
+    def truncate(self, size):
+        """Shrink the size of the file.
+
+        If `size` is less than the size of the file, the file contents after
+        `size` will be discarded.  If `size` is greater than the current size
+        of the file, an IOError will be raised.
+
+        """
+        if size < self.size():
+            new_segs = []
+            for r in self._segments:
+                range_end = r.range_start+r.range_size
+                if r.range_start >= size:
+                    # segment is past the trucate size, all done
+                    break
+                elif size < range_end:
+                    nr = Range(r.locator, r.range_start, size - r.range_start)
+                    nr.segment_offset = r.segment_offset
+                    new_segs.append(nr)
+                    break
+                else:
+                    new_segs.append(r)
+
+            self._segments = new_segs
+            self._modified = True
+        elif size > self.size():
+            raise IOError("truncate() does not support extending the file size")
+
+    def readfrom(self, offset, size, num_retries):
+        """Read upto `size` bytes from the file starting at `offset`."""
+
+        with self.lock:
+            if size == 0 or offset >= self.size():
+                return ''
+            prefetch = locators_and_ranges(self._segments, offset, size + config.KEEP_BLOCK_SIZE)
+            readsegs = locators_and_ranges(self._segments, offset, size)
+
+        for lr in prefetch:
+            self.parent._my_block_manager().block_prefetch(lr.locator)
+
+        data = []
+        for lr in readsegs:
+            block = self.parent._my_block_manager().get_block_contents(lr.locator, num_retries=num_retries, cache_only=bool(data))
+            if block:
+                data.append(block[lr.segment_offset:lr.segment_offset+lr.segment_size])
+            else:
+                break
+        return ''.join(data)
+
+    def _repack_writes(self):
+        """Test if the buffer block has more data than is referenced by actual
+        segments.
+
+        This happens when a buffered write over-writes a file range written in
+        a previous buffered write.  Re-pack the buffer block for efficiency
+        and to avoid leaking information.
+
+        """
+        segs = self._segments
+
+        # Sum up the segments to get the total bytes of the file referencing
+        # into the buffer block.
+        bufferblock_segs = [s for s in segs if s.locator == self._current_bblock.blockid]
+        write_total = sum([s.range_size for s in bufferblock_segs])
+
+        if write_total < self._current_bblock.size():
+            # There is more data in the buffer block than is actually accounted for by segments, so
+            # re-pack into a new buffer by copying over to a new buffer block.
+            new_bb = self.parent._my_block_manager().alloc_bufferblock(self._current_bblock.blockid, starting_capacity=write_total, owner=self)
+            for t in bufferblock_segs:
+                new_bb.append(self._current_bblock.buffer_view[t.segment_offset:t.segment_offset+t.range_size].tobytes())
+                t.segment_offset = new_bb.size() - t.range_size
+
+            self._current_bblock = new_bb
+
+    @must_be_writable
+    @synchronized
+    def writeto(self, offset, data, num_retries):
+        """Write `data` to the file starting at `offset`.
+
+        This will update existing bytes and/or extend the size of the file as
+        necessary.
+
+        """
+        if len(data) == 0:
+            return
+
+        if offset > self.size():
+            raise ArgumentError("Offset is past the end of the file")
+
+        if len(data) > config.KEEP_BLOCK_SIZE:
+            raise ArgumentError("Please append data in chunks smaller than %i bytes (config.KEEP_BLOCK_SIZE)" % (config.KEEP_BLOCK_SIZE))
+
+        self._modified = True
+
+        if self._current_bblock is None or self._current_bblock.state() != BufferBlock.WRITABLE:
+            self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)
+
+        if (self._current_bblock.size() + len(data)) > config.KEEP_BLOCK_SIZE:
+            self._repack_writes()
+            if (self._current_bblock.size() + len(data)) > config.KEEP_BLOCK_SIZE:
+                self.parent._my_block_manager().commit_bufferblock(self._current_bblock)
+                self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)
+
+        self._current_bblock.append(data)
+
+        replace_range(self._segments, offset, len(data), self._current_bblock.blockid, self._current_bblock.write_pointer - len(data))
+
+    @must_be_writable
+    @synchronized
+    def add_segment(self, blocks, pos, size):
+        """Add a segment to the end of the file, with `pos` and `offset` referencing a
+        section of the stream described by `blocks` (a list of Range objects)
+
+        """
+        self._add_segment(blocks, pos, size)
+
+    def _add_segment(self, blocks, pos, size):
+        """Internal implementation of add_segment."""
+        self._modified = True
+        for lr in locators_and_ranges(blocks, pos, size):
+            last = self._segments[-1] if self._segments else Range(0, 0, 0)
+            r = Range(lr.locator, last.range_start+last.range_size, lr.segment_size, lr.segment_offset)
+            self._segments.append(r)
+
+    @synchronized
+    def size(self):
+        """Get the file size."""
+        if self._segments:
+            n = self._segments[-1]
+            return n.range_start + n.range_size
+        else:
+            return 0
+
+class ArvadosFileReader(ArvadosFileReaderBase):
+    """Wraps ArvadosFile in a file-like object supporting reading only.
+
+    Be aware that this class is NOT thread safe as there is no locking around
+    updating file pointer.
+
+    """
+
+    def __init__(self, arvadosfile, name, mode="r", num_retries=None):
+        super(ArvadosFileReader, self).__init__(name, mode, num_retries=num_retries)
+        self.arvadosfile = arvadosfile
+
+    def size(self):
+        return self.arvadosfile.size()
+
+    @FileLikeObjectBase._before_close
+    @retry_method
+    def read(self, size, num_retries=None):
+        """Read up to `size` bytes from the stream, starting at the current file position."""
+        data = self.arvadosfile.readfrom(self._filepos, size, num_retries)
+        self._filepos += len(data)
+        return data
+
+    @FileLikeObjectBase._before_close
+    @retry_method
+    def readfrom(self, offset, size, num_retries=None):
+        """Read up to `size` bytes from the stream, starting at the current file position."""
+        return self.arvadosfile.readfrom(offset, size, num_retries)
+
+    def flush(self):
+        pass
+
+
+class ArvadosFileWriter(ArvadosFileReader):
+    """Wraps ArvadosFile in a file-like object supporting both reading and writing.
+
+    Be aware that this class is NOT thread safe as there is no locking around
+    updating file pointer.
+
+    """
+
+    def __init__(self, arvadosfile, name, mode, num_retries=None):
+        super(ArvadosFileWriter, self).__init__(arvadosfile, name, mode, num_retries=num_retries)
+
+    @FileLikeObjectBase._before_close
+    @retry_method
+    def write(self, data, num_retries=None):
+        if self.mode[0] == "a":
+            self.arvadosfile.writeto(self.size(), data, num_retries)
+        else:
+            self.arvadosfile.writeto(self._filepos, data, num_retries)
+            self._filepos += len(data)
+
+    @FileLikeObjectBase._before_close
+    @retry_method
+    def writelines(self, seq, num_retries=None):
+        for s in seq:
+            self.write(s, num_retries)
+
+    def truncate(self, size=None):
+        if size is None:
+            size = self._filepos
+        self.arvadosfile.truncate(size)
+        if self._filepos > self.size():
+            self._filepos = self.size()
+
+    def close(self):
+        if self.arvadosfile.parent.sync_mode() == SYNC_LIVE:
+            self.arvadosfile.parent.root_collection().save()
index 7bfdf782f8d06b03d6ac482fa64872d1eb8ff9be..f6dc4d52b98f6a376f68e8109645483ccd8d4795 100644 (file)
@@ -2,59 +2,25 @@ import functools
 import logging
 import os
 import re
+import errno
+import time
 
 from collections import deque
 from stat import *
 
-from .arvfile import ArvadosFileBase
+from .arvfile import split, FileLikeObjectBase, ArvadosFile, ArvadosFileWriter, ArvadosFileReader, BlockManager, synchronized, must_be_writable, SYNC_READONLY, SYNC_EXPLICIT, SYNC_LIVE, NoopLock
 from keep import *
-from .stream import StreamReader, split
+from .stream import StreamReader, normalize_stream, locator_block_size
+from .ranges import Range, LocatorAndRange
+from .safeapi import ThreadSafeApiCache
 import config
 import errors
 import util
+import events
+from arvados.retry import retry_method
 
 _logger = logging.getLogger('arvados.collection')
 
-def normalize_stream(s, stream):
-    stream_tokens = [s]
-    sortedfiles = list(stream.keys())
-    sortedfiles.sort()
-
-    blocks = {}
-    streamoffset = 0L
-    for f in sortedfiles:
-        for b in stream[f]:
-            if b[arvados.LOCATOR] not in blocks:
-                stream_tokens.append(b[arvados.LOCATOR])
-                blocks[b[arvados.LOCATOR]] = streamoffset
-                streamoffset += b[arvados.BLOCKSIZE]
-
-    if len(stream_tokens) == 1:
-        stream_tokens.append(config.EMPTY_BLOCK_LOCATOR)
-
-    for f in sortedfiles:
-        current_span = None
-        fout = f.replace(' ', '\\040')
-        for segment in stream[f]:
-            segmentoffset = blocks[segment[arvados.LOCATOR]] + segment[arvados.OFFSET]
-            if current_span is None:
-                current_span = [segmentoffset, segmentoffset + segment[arvados.SEGMENTSIZE]]
-            else:
-                if segmentoffset == current_span[1]:
-                    current_span[1] += segment[arvados.SEGMENTSIZE]
-                else:
-                    stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
-                    current_span = [segmentoffset, segmentoffset + segment[arvados.SEGMENTSIZE]]
-
-        if current_span is not None:
-            stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
-
-        if not stream[f]:
-            stream_tokens.append("0:0:{0}".format(fout))
-
-    return stream_tokens
-
-
 class CollectionBase(object):
     def __enter__(self):
         return self
@@ -193,11 +159,11 @@ class CollectionReader(CollectionBase):
     def _populate_first(orig_func):
         # Decorator for methods that read actual Collection data.
         @functools.wraps(orig_func)
-        def wrapper(self, *args, **kwargs):
+        def populate_first_wrapper(self, *args, **kwargs):
             if self._streams is None:
                 self._populate()
             return orig_func(self, *args, **kwargs)
-        return wrapper
+        return populate_first_wrapper
 
     @_populate_first
     def api_response(self):
@@ -221,7 +187,7 @@ class CollectionReader(CollectionBase):
                 if filename not in streams[streamname]:
                     streams[streamname][filename] = []
                 for r in f.segments:
-                    streams[streamname][filename].extend(s.locators_and_ranges(r[0], r[1]))
+                    streams[streamname][filename].extend(s.locators_and_ranges(r.locator, r.range_size))
 
         self._streams = [normalize_stream(s, streams[s])
                          for s in sorted(streams)]
@@ -278,7 +244,7 @@ class CollectionReader(CollectionBase):
             return self._manifest_text
 
 
-class _WriterFile(ArvadosFileBase):
+class _WriterFile(FileLikeObjectBase):
     def __init__(self, coll_writer, name):
         super(_WriterFile, self).__init__(name, 'wb')
         self.dest = coll_writer
@@ -287,23 +253,21 @@ class _WriterFile(ArvadosFileBase):
         super(_WriterFile, self).close()
         self.dest.finish_current_file()
 
-    @ArvadosFileBase._before_close
+    @FileLikeObjectBase._before_close
     def write(self, data):
         self.dest.write(data)
 
-    @ArvadosFileBase._before_close
+    @FileLikeObjectBase._before_close
     def writelines(self, seq):
         for data in seq:
             self.write(data)
 
-    @ArvadosFileBase._before_close
+    @FileLikeObjectBase._before_close
     def flush(self):
         self.dest.flush_data()
 
 
 class CollectionWriter(CollectionBase):
-    KEEP_BLOCK_SIZE = 2**26
-
     def __init__(self, api_client=None, num_retries=0, replication=None):
         """Instantiate a CollectionWriter.
 
@@ -373,7 +337,7 @@ class CollectionWriter(CollectionBase):
 
     def _work_file(self):
         while True:
-            buf = self._queued_file.read(self.KEEP_BLOCK_SIZE)
+            buf = self._queued_file.read(config.KEEP_BLOCK_SIZE)
             if not buf:
                 break
             self.write(buf)
@@ -445,7 +409,7 @@ class CollectionWriter(CollectionBase):
         self._data_buffer.append(newdata)
         self._data_buffer_len += len(newdata)
         self._current_stream_length += len(newdata)
-        while self._data_buffer_len >= self.KEEP_BLOCK_SIZE:
+        while self._data_buffer_len >= config.KEEP_BLOCK_SIZE:
             self.flush_data()
 
     def open(self, streampath, filename=None):
@@ -482,9 +446,9 @@ class CollectionWriter(CollectionBase):
         if data_buffer:
             self._current_stream_locators.append(
                 self._my_keep().put(
-                    data_buffer[0:self.KEEP_BLOCK_SIZE],
+                    data_buffer[0:config.KEEP_BLOCK_SIZE],
                     copies=self.replication))
-            self._data_buffer = [data_buffer[self.KEEP_BLOCK_SIZE:]]
+            self._data_buffer = [data_buffer[config.KEEP_BLOCK_SIZE:]]
             self._data_buffer_len = len(self._data_buffer[0])
 
     def start_new_file(self, newfilename=None):
@@ -691,3 +655,1082 @@ class ResumableCollectionWriter(CollectionWriter):
             raise errors.AssertionError(
                 "resumable writer can't accept unsourced data")
         return super(ResumableCollectionWriter, self).write(data)
+
+ADD = "add"
+DEL = "del"
+MOD = "mod"
+FILE = "file"
+COLLECTION = "collection"
+
+class SynchronizedCollectionBase(CollectionBase):
+    """Base class for Collections and Subcollections.
+
+    Implements the majority of functionality relating to accessing items in the
+    Collection.
+
+    """
+
+    def __init__(self, parent=None):
+        self.parent = parent
+        self._modified = True
+        self._items = {}
+
+    def _my_api(self):
+        raise NotImplementedError()
+
+    def _my_keep(self):
+        raise NotImplementedError()
+
+    def _my_block_manager(self):
+        raise NotImplementedError()
+
+    def _populate(self):
+        raise NotImplementedError()
+
+    def sync_mode(self):
+        raise NotImplementedError()
+
+    def root_collection(self):
+        raise NotImplementedError()
+
+    def notify(self, event, collection, name, item):
+        raise NotImplementedError()
+
+    @must_be_writable
+    @synchronized
+    def find_or_create(self, path, create_type):
+        """Recursively search the specified file path.
+
+        May return either a `Collection` or `ArvadosFile`.  If not found, will
+        create a new item at the specified path based on `create_type`.  Will
+        create intermediate subcollections needed to contain the final item in
+        the path.
+
+        :create_type:
+          One of `arvado.collection.FILE` or
+          `arvado.collection.COLLECTION`.  If the path is not found, and value
+          of create_type is FILE then create and return a new ArvadosFile for
+          the last path component.  If COLLECTION, then create and return a new
+          Collection for the last path component.
+
+        """
+
+        if self.sync_mode() == SYNC_READONLY:
+            raise IOError((errno.EROFS, "Collection is read only"))
+
+        pathcomponents = path.split("/")
+        if pathcomponents[0] == '.':
+            del pathcomponents[0]
+
+        if pathcomponents and pathcomponents[0]:
+            item = self._items.get(pathcomponents[0])
+            if len(pathcomponents) == 1:
+                # item must be a file
+                if item is None:
+                    # create new file
+                    if create_type == COLLECTION:
+                        item = Subcollection(self)
+                    else:
+                        item = ArvadosFile(self)
+                    self._items[pathcomponents[0]] = item
+                    self._modified = True
+                    self.notify(ADD, self, pathcomponents[0], item)
+                return item
+            else:
+                if item is None:
+                    # create new collection
+                    item = Subcollection(self)
+                    self._items[pathcomponents[0]] = item
+                    self._modified = True
+                    self.notify(ADD, self, pathcomponents[0], item)
+                del pathcomponents[0]
+                if isinstance(item, SynchronizedCollectionBase):
+                    return item.find_or_create("/".join(pathcomponents), create_type)
+                else:
+                    raise errors.ArgumentError("Interior path components must be subcollection")
+        else:
+            return self
+
+    @synchronized
+    def find(self, path):
+        """Recursively search the specified file path.
+
+        May return either a Collection or ArvadosFile.  Return None if not
+        found.
+
+        """
+        pathcomponents = path.split("/")
+        if pathcomponents[0] == '.':
+            del pathcomponents[0]
+
+        if pathcomponents and pathcomponents[0]:
+            item = self._items.get(pathcomponents[0])
+            if len(pathcomponents) == 1:
+                # item must be a file
+                return item
+            else:
+                del pathcomponents[0]
+                if isinstance(item, SynchronizedCollectionBase):
+                    return item.find("/".join(pathcomponents))
+                else:
+                    raise errors.ArgumentError("Interior path components must be subcollection")
+        else:
+            return self
+
+    def mkdirs(path):
+        """Recursive subcollection create.
+
+        Like `os.mkdirs()`.  Will create intermediate subcollections needed to
+        contain the leaf subcollection path.
+
+        """
+        return self.find_or_create(path, COLLECTION)
+
+    def open(self, path, mode):
+        """Open a file-like object for access.
+
+        :path:
+          path to a file in the collection
+        :mode:
+          one of "r", "r+", "w", "w+", "a", "a+"
+          :"r":
+            opens for reading
+          :"r+":
+            opens for reading and writing.  Reads/writes share a file pointer.
+          :"w", "w+":
+            truncates to 0 and opens for reading and writing.  Reads/writes share a file pointer.
+          :"a", "a+":
+            opens for reading and writing.  All writes are appended to
+            the end of the file.  Writing does not affect the file pointer for
+            reading.
+        """
+        mode = mode.replace("b", "")
+        if len(mode) == 0 or mode[0] not in ("r", "w", "a"):
+            raise ArgumentError("Bad mode '%s'" % mode)
+        create = (mode != "r")
+
+        if create and self.sync_mode() == SYNC_READONLY:
+            raise IOError((errno.EROFS, "Collection is read only"))
+
+        if create:
+            arvfile = self.find_or_create(path, FILE)
+        else:
+            arvfile = self.find(path)
+
+        if arvfile is None:
+            raise IOError((errno.ENOENT, "File not found"))
+        if not isinstance(arvfile, ArvadosFile):
+            raise IOError((errno.EISDIR, "Path must refer to a file."))
+
+        if mode[0] == "w":
+            arvfile.truncate(0)
+
+        if mode == "r":
+            return ArvadosFileReader(arvfile, path, mode, num_retries=self.num_retries)
+        else:
+            return ArvadosFileWriter(arvfile, path, mode, num_retries=self.num_retries)
+
+    @synchronized
+    def modified(self):
+        """Test if the collection (or any subcollection or file) has been modified
+        since it was created."""
+        if self._modified:
+            return True
+        for k,v in self._items.items():
+            if v.modified():
+                return True
+        return False
+
+    @synchronized
+    def set_unmodified(self):
+        """Recursively clear modified flag."""
+        self._modified = False
+        for k,v in self._items.items():
+            v.set_unmodified()
+
+    @synchronized
+    def __iter__(self):
+        """Iterate over names of files and collections contained in this collection."""
+        return iter(self._items.keys())
+
+    @synchronized
+    def iterkeys(self):
+        """Iterate over names of files and collections directly contained in this collection."""
+        return self._items.keys()
+
+    @synchronized
+    def __getitem__(self, k):
+        """Get a file or collection that is directly contained by this collection.  If
+        you want to search a path, use `find()` instead.
+        """
+        return self._items[k]
+
+    @synchronized
+    def __contains__(self, k):
+        """If there is a file or collection a directly contained by this collection
+        with name `k`."""
+        return k in self._items
+
+    @synchronized
+    def __len__(self):
+        """Get the number of items directly contained in this collection."""
+        return len(self._items)
+
+    @must_be_writable
+    @synchronized
+    def __delitem__(self, p):
+        """Delete an item by name which is directly contained by this collection."""
+        del self._items[p]
+        self._modified = True
+        self.notify(DEL, self, p, None)
+
+    @synchronized
+    def keys(self):
+        """Get a list of names of files and collections directly contained in this collection."""
+        return self._items.keys()
+
+    @synchronized
+    def values(self):
+        """Get a list of files and collection objects directly contained in this collection."""
+        return self._items.values()
+
+    @synchronized
+    def items(self):
+        """Get a list of (name, object) tuples directly contained in this collection."""
+        return self._items.items()
+
+    def exists(self, path):
+        """Test if there is a file or collection at `path`."""
+        return self.find(path) != None
+
+    @must_be_writable
+    @synchronized
+    def remove(self, path, recursive=False):
+        """Remove the file or subcollection (directory) at `path`.
+
+        :recursive:
+          Specify whether to remove non-empty subcollections (True), or raise an error (False).
+        """
+        pathcomponents = path.split("/")
+        if pathcomponents[0] == '.':
+            # Remove '.' from the front of the path
+            del pathcomponents[0]
+
+        if len(pathcomponents) > 0:
+            item = self._items.get(pathcomponents[0])
+            if item is None:
+                raise IOError((errno.ENOENT, "File not found"))
+            if len(pathcomponents) == 1:
+                if isinstance(self._items[pathcomponents[0]], SynchronizedCollectionBase) and len(self._items[pathcomponents[0]]) > 0 and not recursive:
+                    raise IOError((errno.ENOTEMPTY, "Subcollection not empty"))
+                deleteditem = self._items[pathcomponents[0]]
+                del self._items[pathcomponents[0]]
+                self._modified = True
+                self.notify(DEL, self, pathcomponents[0], deleteditem)
+            else:
+                del pathcomponents[0]
+                item.remove("/".join(pathcomponents))
+        else:
+            raise IOError((errno.ENOENT, "File not found"))
+
+    def _cloneinto(self, target):
+        for k,v in self._items.items():
+            target._items[k] = v.clone(target)
+
+    def clone(self):
+        raise NotImplementedError()
+
+    @must_be_writable
+    @synchronized
+    def copy(self, source, target_path, source_collection=None, overwrite=False):
+        """Copy a file or subcollection to a new path in this collection.
+
+        :source:
+          An ArvadosFile, Subcollection, or string with a path to source file or subcollection
+
+        :target_path:
+          Destination file or path.  If the target path already exists and is a
+          subcollection, the item will be placed inside the subcollection.  If
+          the target path already exists and is a file, this will raise an error
+          unless you specify `overwrite=True`.
+
+        :source_collection:
+          Collection to copy `source_path` from (default `self`)
+
+        :overwrite:
+          Whether to overwrite target file if it already exists.
+        """
+        if source_collection is None:
+            source_collection = self
+
+        # Find the object to copy
+        if isinstance(source, basestring):
+            source_obj = source_collection.find(source)
+            if source_obj is None:
+                raise IOError((errno.ENOENT, "File not found"))
+            sourcecomponents = source.split("/")
+        else:
+            source_obj = source
+            sourcecomponents = None
+
+        # Find parent collection the target path
+        targetcomponents = target_path.split("/")
+
+        # Determine the name to use.
+        target_name = targetcomponents[-1] if targetcomponents[-1] else (sourcecomponents[-1] if sourcecomponents else None)
+
+        if not target_name:
+            raise errors.ArgumentError("Target path is empty and source is an object.  Cannot determine destination filename to use.")
+
+        target_dir = self.find_or_create("/".join(targetcomponents[0:-1]), COLLECTION)
+
+        with target_dir.lock:
+            if target_name in target_dir:
+                if isinstance(target_dir[target_name], SynchronizedCollectionBase) and sourcecomponents:
+                    target_dir = target_dir[target_name]
+                    target_name = sourcecomponents[-1]
+                elif not overwrite:
+                    raise IOError((errno.EEXIST, "File already exists"))
+
+            modified_from = None
+            if target_name in target_dir:
+                modified_from = target_dir[target_name]
+
+            # Actually make the copy.
+            dup = source_obj.clone(target_dir)
+            target_dir._items[target_name] = dup
+            target_dir._modified = True
+
+        if modified_from:
+            self.notify(MOD, target_dir, target_name, (modified_from, dup))
+        else:
+            self.notify(ADD, target_dir, target_name, dup)
+
+    @synchronized
+    def manifest_text(self, strip=False, normalize=False):
+        """Get the manifest text for this collection, sub collections and files.
+
+        :strip:
+          If True, remove signing tokens from block locators if present.
+          If False, block locators are left unchanged.
+
+        :normalize:
+          If True, always export the manifest text in normalized form
+          even if the Collection is not modified.  If False and the collection
+          is not modified, return the original manifest text even if it is not
+          in normalized form.
+
+        """
+        if self.modified() or self._manifest_text is None or normalize:
+            return export_manifest(self, stream_name=".", portable_locators=strip)
+        else:
+            if strip:
+                return self.stripped_manifest()
+            else:
+                return self._manifest_text
+
+    @synchronized
+    def diff(self, end_collection, prefix=".", holding_collection=None):
+        """
+        Generate list of add/modify/delete actions which, when given to `apply`, will
+        change `self` to match `end_collection`
+        """
+        changes = []
+        if holding_collection is None:
+            holding_collection = Collection(api_client=self._my_api(), keep_client=self._my_keep(), sync=SYNC_EXPLICIT)
+        for k in self:
+            if k not in end_collection:
+               changes.append((DEL, os.path.join(prefix, k), self[k].clone(holding_collection)))
+        for k in end_collection:
+            if k in self:
+                if isinstance(end_collection[k], Subcollection) and isinstance(self[k], Subcollection):
+                    changes.extend(self[k].diff(end_collection[k], os.path.join(prefix, k), holding_collection))
+                elif end_collection[k] != self[k]:
+                    changes.append((MOD, os.path.join(prefix, k), self[k].clone(holding_collection), end_collection[k].clone(holding_collection)))
+            else:
+                changes.append((ADD, os.path.join(prefix, k), end_collection[k].clone(holding_collection)))
+        return changes
+
+    @must_be_writable
+    @synchronized
+    def apply(self, changes):
+        """Apply changes from `diff`.
+
+        If a change conflicts with a local change, it will be saved to an
+        alternate path indicating the conflict.
+
+        """
+        for change in changes:
+            event_type = change[0]
+            path = change[1]
+            initial = change[2]
+            local = self.find(path)
+            conflictpath = "%s~conflict-%s~" % (path, time.strftime("%Y-%m-%d-%H:%M:%S",
+                                                                    time.gmtime()))
+            if event_type == ADD:
+                if local is None:
+                    # No local file at path, safe to copy over new file
+                    self.copy(initial, path)
+                elif local is not None and local != initial:
+                    # There is already local file and it is different:
+                    # save change to conflict file.
+                    self.copy(initial, conflictpath)
+            elif event_type == MOD:
+                final = change[3]
+                if local == initial:
+                    # Local matches the "initial" item so it has not
+                    # changed locally and is safe to update.
+                    if isinstance(local, ArvadosFile) and isinstance(final, ArvadosFile):
+                        # Replace contents of local file with new contents
+                        local.replace_contents(final)
+                    else:
+                        # Overwrite path with new item; this can happen if
+                        # path was a file and is now a collection or vice versa
+                        self.copy(final, path, overwrite=True)
+                else:
+                    # Local is missing (presumably deleted) or local doesn't
+                    # match the "start" value, so save change to conflict file
+                    self.copy(final, conflictpath)
+            elif event_type == DEL:
+                if local == initial:
+                    # Local item matches "initial" value, so it is safe to remove.
+                    self.remove(path, recursive=True)
+                # else, the file is modified or already removed, in either
+                # case we don't want to try to remove it.
+
+    def portable_data_hash(self):
+        """Get the portable data hash for this collection's manifest."""
+        stripped = self.manifest_text(strip=True)
+        return hashlib.md5(stripped).hexdigest() + '+' + str(len(stripped))
+
+    @synchronized
+    def __eq__(self, other):
+        if other is self:
+            return True
+        if not isinstance(other, SynchronizedCollectionBase):
+            return False
+        if len(self._items) != len(other):
+            return False
+        for k in self._items:
+            if k not in other:
+                return False
+            if self._items[k] != other[k]:
+                return False
+        return True
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+class Collection(SynchronizedCollectionBase):
+    """Represents the root of an Arvados Collection, which may be associated with
+    an API server Collection record.
+
+    Brief summary of useful methods:
+
+    :To read an existing file:
+      `c.open("myfile", "r")`
+
+    :To write a new file:
+      `c.open("myfile", "w")`
+
+    :To determine if a file exists:
+      `c.find("myfile") is not None`
+
+    :To copy a file:
+      `c.copy("source", "dest")`
+
+    :To delete a file:
+      `c.remove("myfile")`
+
+    :To save to an existing collection record:
+      `c.save()`
+
+    :To save a new collection record:
+    `c.save_new()`
+
+    :To merge remote changes into this object:
+      `c.update()`
+
+    This class is threadsafe.  The root collection object, all subcollections
+    and files are protected by a single lock (i.e. each access locks the entire
+    collection).
+
+    """
+
+    def __init__(self, manifest_locator_or_text=None,
+                 parent=None,
+                 apiconfig=None,
+                 api_client=None,
+                 keep_client=None,
+                 num_retries=None,
+                 block_manager=None,
+                 sync=None):
+        """Collection constructor.
+
+        :manifest_locator_or_text:
+          One of Arvados collection UUID, block locator of
+          a manifest, raw manifest text, or None (to create an empty collection).
+        :parent:
+          the parent Collection, may be None.
+        :apiconfig:
+          A dict containing keys for ARVADOS_API_HOST and ARVADOS_API_TOKEN.
+          Prefer this over supplying your own api_client and keep_client (except in testing).
+          Will use default config settings if not specified.
+        :api_client:
+          The API client object to use for requests.  If not specified, create one using `apiconfig`.
+        :keep_client:
+          the Keep client to use for requests.  If not specified, create one using `apiconfig`.
+        :num_retries:
+          the number of retries for API and Keep requests.
+        :block_manager:
+          the block manager to use.  If not specified, create one.
+        :sync:
+          Set synchronization policy with API server collection record.
+          :SYNC_READONLY:
+            Collection is read only.  No synchronization.  This mode will
+            also forego locking, which gives better performance.
+          :SYNC_EXPLICIT:
+            Collection is writable.  Synchronize on explicit request via `update()` or `save()`
+          :SYNC_LIVE:
+            Collection is writable.  Synchronize with server in response to
+            background websocket events, on block write, or on file close.
+
+        """
+        super(Collection, self).__init__(parent)
+        self._api_client = api_client
+        self._keep_client = keep_client
+        self._block_manager = block_manager
+
+        if apiconfig:
+            self._config = apiconfig
+        else:
+            self._config = config.settings()
+
+        self.num_retries = num_retries if num_retries is not None else 2
+        self._manifest_locator = None
+        self._manifest_text = None
+        self._api_response = None
+
+        if sync is None:
+            raise errors.ArgumentError("Must specify sync mode")
+
+        self._sync = sync
+        self.lock = threading.RLock()
+        self.callbacks = []
+        self.events = None
+
+        if manifest_locator_or_text:
+            if re.match(util.keep_locator_pattern, manifest_locator_or_text):
+                self._manifest_locator = manifest_locator_or_text
+            elif re.match(util.collection_uuid_pattern, manifest_locator_or_text):
+                self._manifest_locator = manifest_locator_or_text
+            elif re.match(util.manifest_pattern, manifest_locator_or_text):
+                self._manifest_text = manifest_locator_or_text
+            else:
+                raise errors.ArgumentError(
+                    "Argument to CollectionReader must be a manifest or a collection UUID")
+
+            self._populate()
+            self._subscribe_events()
+
+
+    def root_collection(self):
+        return self
+
+    def sync_mode(self):
+        return self._sync
+
+    def _subscribe_events(self):
+        if self._sync == SYNC_LIVE and self.events is None:
+            if not self._has_collection_uuid():
+                raise errors.ArgumentError("Cannot SYNC_LIVE associated with a collection uuid")
+            self.events = events.subscribe(arvados.api(apiconfig=self._config),
+                                           [["object_uuid", "=", self._manifest_locator]],
+                                           self.on_message)
+
+    def on_message(self, event):
+        if event.get("object_uuid") == self._manifest_locator:
+            self.update()
+
+    @synchronized
+    @retry_method
+    def update(self, other=None, num_retries=None):
+        """Fetch the latest collection record on the API server and merge it with the
+        current collection contents.
+
+        """
+        if other is None:
+            if self._manifest_locator is None:
+                raise errors.ArgumentError("`other` is None but collection does not have a manifest_locator uuid")
+            response = self._my_api().collections().get(uuid=self._manifest_locator).execute(num_retries=num_retries)
+            other = import_manifest(response["manifest_text"])
+        baseline = import_manifest(self._manifest_text)
+        self.apply(baseline.diff(other))
+
+    @synchronized
+    def _my_api(self):
+        if self._api_client is None:
+            self._api_client = ThreadSafeApiCache(self._config)
+            self._keep_client = self._api_client.keep
+        return self._api_client
+
+    @synchronized
+    def _my_keep(self):
+        if self._keep_client is None:
+            if self._api_client is None:
+                self._my_api()
+            else:
+                self._keep_client = KeepClient(api=self._api_client)
+        return self._keep_client
+
+    @synchronized
+    def _my_block_manager(self):
+        if self._block_manager is None:
+            self._block_manager = BlockManager(self._my_keep())
+        return self._block_manager
+
+    def _populate_from_api_server(self):
+        # As in KeepClient itself, we must wait until the last
+        # possible moment to instantiate an API client, in order to
+        # avoid tripping up clients that don't have access to an API
+        # server.  If we do build one, make sure our Keep client uses
+        # it.  If instantiation fails, we'll fall back to the except
+        # clause, just like any other Collection lookup
+        # failure. Return an exception, or None if successful.
+        try:
+            self._api_response = self._my_api().collections().get(
+                uuid=self._manifest_locator).execute(
+                    num_retries=self.num_retries)
+            self._manifest_text = self._api_response['manifest_text']
+            return None
+        except Exception as e:
+            return e
+
+    def _populate_from_keep(self):
+        # Retrieve a manifest directly from Keep. This has a chance of
+        # working if [a] the locator includes a permission signature
+        # or [b] the Keep services are operating in world-readable
+        # mode. Return an exception, or None if successful.
+        try:
+            self._manifest_text = self._my_keep().get(
+                self._manifest_locator, num_retries=self.num_retries)
+        except Exception as e:
+            return e
+
+    def _populate(self):
+        if self._manifest_locator is None and self._manifest_text is None:
+            return
+        error_via_api = None
+        error_via_keep = None
+        should_try_keep = ((self._manifest_text is None) and
+                           util.keep_locator_pattern.match(
+                               self._manifest_locator))
+        if ((self._manifest_text is None) and
+            util.signed_locator_pattern.match(self._manifest_locator)):
+            error_via_keep = self._populate_from_keep()
+        if self._manifest_text is None:
+            error_via_api = self._populate_from_api_server()
+            if error_via_api is not None and not should_try_keep:
+                raise error_via_api
+        if ((self._manifest_text is None) and
+            not error_via_keep and
+            should_try_keep):
+            # Looks like a keep locator, and we didn't already try keep above
+            error_via_keep = self._populate_from_keep()
+        if self._manifest_text is None:
+            # Nothing worked!
+            raise arvados.errors.NotFoundError(
+                ("Failed to retrieve collection '{}' " +
+                 "from either API server ({}) or Keep ({})."
+                 ).format(
+                    self._manifest_locator,
+                    error_via_api,
+                    error_via_keep))
+        # populate
+        self._baseline_manifest = self._manifest_text
+        import_manifest(self._manifest_text, self)
+
+        if self._sync == SYNC_READONLY:
+            # Now that we're populated, knowing that this will be readonly,
+            # forego any further locking.
+            self.lock = NoopLock()
+
+    def _has_collection_uuid(self):
+        return self._manifest_locator is not None and re.match(util.collection_uuid_pattern, self._manifest_locator)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        """Support scoped auto-commit in a with: block."""
+        if self._sync != SYNC_READONLY and self._has_collection_uuid():
+            self.save()
+        if self._block_manager is not None:
+            self._block_manager.stop_threads()
+
+    @synchronized
+    def clone(self, new_parent=None, new_sync=SYNC_READONLY, new_config=None):
+        if new_config is None:
+            new_config = self._config
+        newcollection = Collection(parent=new_parent, apiconfig=new_config, sync=SYNC_EXPLICIT)
+        if new_sync == SYNC_READONLY:
+            newcollection.lock = NoopLock()
+        self._cloneinto(newcollection)
+        newcollection._sync = new_sync
+        return newcollection
+
+    @synchronized
+    def api_response(self):
+        """Returns information about this Collection fetched from the API server.
+
+        If the Collection exists in Keep but not the API server, currently
+        returns None.  Future versions may provide a synthetic response.
+
+        """
+        return self._api_response
+
+    @must_be_writable
+    @synchronized
+    @retry_method
+    def save(self, merge=True, num_retries=None):
+        """Commit pending buffer blocks to Keep, merge with remote record (if
+        update=True), write the manifest to Keep, and update the collection
+        record.
+
+        Will raise AssertionError if not associated with a collection record on
+        the API server.  If you want to save a manifest to Keep only, see
+        `save_new()`.
+
+        :update:
+          Update and merge remote changes before saving.  Otherwise, any
+          remote changes will be ignored and overwritten.
+
+        """
+        if self.modified():
+            if not self._has_collection_uuid():
+                raise AssertionError("Collection manifest_locator must be a collection uuid.  Use save_as() for new collections.")
+            self._my_block_manager().commit_all()
+            if merge:
+                self.update()
+            self._my_keep().put(self.manifest_text(strip=True), num_retries=num_retries)
+
+            text = self.manifest_text(strip=False)
+            self._api_response = self._my_api().collections().update(
+                uuid=self._manifest_locator,
+                body={'manifest_text': text}
+                ).execute(
+                    num_retries=num_retries)
+            self._manifest_text = text
+            self.set_unmodified()
+
+    @must_be_writable
+    @synchronized
+    @retry_method
+    def save_new(self, name=None, create_collection_record=True, owner_uuid=None, ensure_unique_name=False, num_retries=None):
+        """Commit pending buffer blocks to Keep, write the manifest to Keep, and create
+        a new collection record (if create_collection_record True).
+
+        After creating a new collection record, this Collection object will be
+        associated with the new record for `save()` and SYNC_LIVE updates.
+
+        :name:
+          The collection name.
+
+        :keep_only:
+          Only save the manifest to keep, do not create a collection record.
+
+        :owner_uuid:
+          the user, or project uuid that will own this collection.
+          If None, defaults to the current user.
+
+        :ensure_unique_name:
+          If True, ask the API server to rename the collection
+          if it conflicts with a collection with the same name and owner.  If
+          False, a name conflict will result in an error.
+
+        """
+        self._my_block_manager().commit_all()
+        self._my_keep().put(self.manifest_text(strip=True), num_retries=num_retries)
+        text = self.manifest_text(strip=False)
+
+        if create_collection_record:
+            if name is None:
+                name = "Collection created %s" % (time.strftime("%Y-%m-%d %H:%M:%S %Z", time.localtime()))
+
+            body = {"manifest_text": text,
+                    "name": name}
+            if owner_uuid:
+                body["owner_uuid"] = owner_uuid
+
+            self._api_response = self._my_api().collections().create(ensure_unique_name=ensure_unique_name, body=body).execute(num_retries=num_retries)
+
+            if self.events:
+                self.events.unsubscribe(filters=[["object_uuid", "=", self._manifest_locator]])
+
+            self._manifest_locator = self._api_response["uuid"]
+
+            if self.events:
+                self.events.subscribe(filters=[["object_uuid", "=", self._manifest_locator]])
+
+        self._manifest_text = text
+        self.set_unmodified()
+
+    @synchronized
+    def subscribe(self, callback):
+        self.callbacks.append(callback)
+
+    @synchronized
+    def unsubscribe(self, callback):
+        self.callbacks.remove(callback)
+
+    @synchronized
+    def notify(self, event, collection, name, item):
+        for c in self.callbacks:
+            c(event, collection, name, item)
+
+def ReadOnlyCollection(*args, **kwargs):
+    """Create a read-only collection object from an api collection record locator,
+    a portable data hash of a manifest, or raw manifest text.
+
+    See `Collection` constructor for detailed options.
+
+    """
+    kwargs["sync"] = SYNC_READONLY
+    return Collection(*args, **kwargs)
+
+def WritableCollection(*args, **kwargs):
+    """Create a writable collection object from an api collection record locator,
+    a portable data hash of a manifest, or raw manifest text.
+
+    See `Collection` constructor for detailed options.
+
+    """
+
+    kwargs["sync"] = SYNC_EXPLICIT
+    return Collection(*args, **kwargs)
+
+def LiveCollection(*args, **kwargs):
+    """Create a writable, live updating collection object representing an existing
+    collection record on the API server.
+
+    See `Collection` constructor for detailed options.
+
+    """
+    kwargs["sync"] = SYNC_LIVE
+    return Collection(*args, **kwargs)
+
+def createWritableCollection(name, owner_uuid=None, apiconfig=None):
+    """Create an empty, writable collection object and create an associated api
+    collection record.
+
+    :name:
+      The collection name
+
+    :owner_uuid:
+      The parent project.
+
+    :apiconfig:
+      Optional alternate api configuration to use (to specify alternate API
+      host or token than the default.)
+
+    """
+    newcollection = Collection(sync=SYNC_EXPLICIT, apiconfig=apiconfig)
+    newcollection.save_new(name, owner_uuid=owner_uuid, ensure_unique_name=True)
+    return newcollection
+
+def createLiveCollection(name, owner_uuid=None, apiconfig=None):
+    """Create an empty, writable, live updating Collection object and create an
+    associated collection record on the API server.
+
+    :name:
+      The collection name
+
+    :owner_uuid:
+      The parent project.
+
+    :apiconfig:
+      Optional alternate api configuration to use (to specify alternate API
+      host or token than the default.)
+
+    """
+    newcollection = Collection(sync=SYNC_EXPLICIT, apiconfig=apiconfig)
+    newcollection.save_new(name, owner_uuid=owner_uuid, ensure_unique_name=True)
+    newcollection._sync = SYNC_LIVE
+    newcollection._subscribe_events()
+    return newcollection
+
+class Subcollection(SynchronizedCollectionBase):
+    """This is a subdirectory within a collection that doesn't have its own API
+    server record.
+
+    It falls under the umbrella of the root collection.
+
+    """
+
+    def __init__(self, parent):
+        super(Subcollection, self).__init__(parent)
+        self.lock = self.root_collection().lock
+
+    def root_collection(self):
+        return self.parent.root_collection()
+
+    def sync_mode(self):
+        return self.root_collection().sync_mode()
+
+    def _my_api(self):
+        return self.root_collection()._my_api()
+
+    def _my_keep(self):
+        return self.root_collection()._my_keep()
+
+    def _my_block_manager(self):
+        return self.root_collection()._my_block_manager()
+
+    def _populate(self):
+        self.root_collection()._populate()
+
+    def notify(self, event, collection, name, item):
+        return self.root_collection().notify(event, collection, name, item)
+
+    @synchronized
+    def clone(self, new_parent):
+        c = Subcollection(new_parent)
+        self._cloneinto(c)
+        return c
+
+def import_manifest(manifest_text,
+                    into_collection=None,
+                    api_client=None,
+                    keep=None,
+                    num_retries=None,
+                    sync=SYNC_READONLY):
+    """Import a manifest into a `Collection`.
+
+    :manifest_text:
+      The manifest text to import from.
+
+    :into_collection:
+      The `Collection` that will be initialized (must be empty).
+      If None, create a new `Collection` object.
+
+    :api_client:
+      The API client object that will be used when creating a new `Collection` object.
+
+    :keep:
+      The keep client object that will be used when creating a new `Collection` object.
+
+    :num_retries:
+      the default number of api client and keep retries on error.
+
+    :sync:
+      Collection sync mode (only if into_collection is None)
+    """
+    if into_collection is not None:
+        if len(into_collection) > 0:
+            raise ArgumentError("Can only import manifest into an empty collection")
+    else:
+        into_collection = Collection(api_client=api_client, keep_client=keep, num_retries=num_retries, sync=sync)
+
+    save_sync = into_collection.sync_mode()
+    into_collection._sync = None
+
+    STREAM_NAME = 0
+    BLOCKS = 1
+    SEGMENTS = 2
+
+    stream_name = None
+    state = STREAM_NAME
+
+    for n in re.finditer(r'(\S+)(\s+|$)', manifest_text):
+        tok = n.group(1)
+        sep = n.group(2)
+
+        if state == STREAM_NAME:
+            # starting a new stream
+            stream_name = tok.replace('\\040', ' ')
+            blocks = []
+            segments = []
+            streamoffset = 0L
+            state = BLOCKS
+            continue
+
+        if state == BLOCKS:
+            s = re.match(r'[0-9a-f]{32}\+(\d+)(\+\S+)*', tok)
+            if s:
+                blocksize = long(s.group(1))
+                blocks.append(Range(tok, streamoffset, blocksize))
+                streamoffset += blocksize
+            else:
+                state = SEGMENTS
+
+        if state == SEGMENTS:
+            s = re.search(r'^(\d+):(\d+):(\S+)', tok)
+            if s:
+                pos = long(s.group(1))
+                size = long(s.group(2))
+                name = s.group(3).replace('\\040', ' ')
+                f = into_collection.find_or_create("%s/%s" % (stream_name, name), FILE)
+                f.add_segment(blocks, pos, size)
+            else:
+                # error!
+                raise errors.SyntaxError("Invalid manifest format")
+
+        if sep == "\n":
+            stream_name = None
+            state = STREAM_NAME
+
+    into_collection.set_unmodified()
+    into_collection._sync = save_sync
+    return into_collection
+
+def export_manifest(item, stream_name=".", portable_locators=False):
+    """Export a manifest from the contents of a SynchronizedCollectionBase.
+
+    :item:
+      Create a manifest for `item` (must be a `SynchronizedCollectionBase` or `ArvadosFile`).  If
+      `item` is a is a `Collection`, this will also export subcollections.
+
+    :stream_name:
+      the name of the stream when exporting `item`.
+
+    :portable_locators:
+      If True, strip any permission hints on block locators.
+      If False, use block locators as-is.
+
+    """
+    buf = ""
+    if isinstance(item, SynchronizedCollectionBase):
+        stream = {}
+        sorted_keys = sorted(item.keys())
+        for filename in [s for s in sorted_keys if isinstance(item[s], ArvadosFile)]:
+            # Create a stream per file `k`
+            arvfile = item[filename]
+            filestream = []
+            for segment in arvfile.segments():
+                loc = segment.locator
+                if loc.startswith("bufferblock"):
+                    loc = arvfile.parent._my_block_manager()._bufferblocks[loc].locator()
+                if portable_locators:
+                    loc = KeepLocator(loc).stripped()
+                filestream.append(LocatorAndRange(loc, locator_block_size(loc),
+                                     segment.segment_offset, segment.range_size))
+            stream[filename] = filestream
+        if stream:
+            buf += ' '.join(normalize_stream(stream_name, stream))
+            buf += "\n"
+        for dirname in [s for s in sorted_keys if isinstance(item[s], SynchronizedCollectionBase)]:
+            buf += export_manifest(item[dirname], stream_name=os.path.join(stream_name, dirname), portable_locators=portable_locators)
+    elif isinstance(item, ArvadosFile):
+        filestream = []
+        for segment in item.segments:
+            loc = segment.locator
+            if loc.startswith("bufferblock"):
+                loc = item._bufferblocks[loc].calculate_locator()
+            if portable_locators:
+                loc = KeepLocator(loc).stripped()
+            filestream.append(LocatorAndRange(loc, locator_block_size(loc),
+                                 segment.segment_offset, segment.range_size))
+        stream[stream_name] = filestream
+        buf += ' '.join(normalize_stream(stream_name, stream))
+        buf += "\n"
+    return buf
index f556e7ecb598eb0381400709d7edd272c76c7be3..d8ed90bda007525630a394b259b9528e845b62cc 100644 (file)
@@ -301,12 +301,12 @@ class ArvPutCollectionWriter(arvados.ResumableCollectionWriter):
 
     def flush_data(self):
         start_buffer_len = self._data_buffer_len
-        start_block_count = self.bytes_written / self.KEEP_BLOCK_SIZE
+        start_block_count = self.bytes_written / arvados.config.KEEP_BLOCK_SIZE
         super(ArvPutCollectionWriter, self).flush_data()
         if self._data_buffer_len < start_buffer_len:  # We actually PUT data.
             self.bytes_written += (start_buffer_len - self._data_buffer_len)
             self.report_progress()
-            if (self.bytes_written / self.KEEP_BLOCK_SIZE) > start_block_count:
+            if (self.bytes_written / arvados.config.KEEP_BLOCK_SIZE) > start_block_count:
                 self.cache_state()
 
     def _record_new_input(self, input_type, source_name, dest_name):
index a0c3cc64ae1306b16ff65feb5ca9b1b210f47c7d..8f2b2654ad1b73f7720415943c09c60f0d575791 100644 (file)
@@ -12,6 +12,7 @@ if os.environ.get('HOME') is not None:
 else:
     default_config_file = ''
 
+KEEP_BLOCK_SIZE = 2**26
 EMPTY_BLOCK_LOCATOR = 'd41d8cd98f00b204e9800998ecf8427e+0'
 
 def initialize(config_file=default_config_file):
@@ -41,8 +42,10 @@ def load(config_file):
             cfg[var] = val
     return cfg
 
-def flag_is_true(key):
-    return get(key, '').lower() in set(['1', 't', 'true', 'y', 'yes'])
+def flag_is_true(key, d=None):
+    if d is None:
+        d = settings()
+    return d.get(key, '').lower() in set(['1', 't', 'true', 'y', 'yes'])
 
 def get(key, default_val=None):
     return settings().get(key, default_val)
index 262e68864db7a7e12847a138de9922c489f473e2..f59ec710c49bf3a1b23054b679cf1898b098ed14 100644 (file)
@@ -58,6 +58,9 @@ class KeepLocator(object):
                              self.permission_hint()] + self.hints
             if s is not None)
 
+    def stripped(self):
+        return "%s+%i" % (self.md5sum, self.size)
+
     def _make_hex_prop(name, length):
         # Build and return a new property with the given name that
         # must be a hex string of the given length.
@@ -171,8 +174,7 @@ class KeepBlockCache(object):
 
     def cap_cache(self):
         '''Cap the cache size to self.cache_max'''
-        self._cache_lock.acquire()
-        try:
+        with self._cache_lock:
             # Select all slots except those where ready.is_set() and content is
             # None (that means there was an error reading the block).
             self._cache = [c for c in self._cache if not (c.ready.is_set() and c.content is None)]
@@ -183,30 +185,35 @@ class KeepBlockCache(object):
                         del self._cache[i]
                         break
                 sm = sum([slot.size() for slot in self._cache])
-        finally:
-            self._cache_lock.release()
+
+    def _get(self, locator):
+        # Test if the locator is already in the cache
+        for i in xrange(0, len(self._cache)):
+            if self._cache[i].locator == locator:
+                n = self._cache[i]
+                if i != 0:
+                    # move it to the front
+                    del self._cache[i]
+                    self._cache.insert(0, n)
+                return n
+        return None
+
+    def get(self, locator):
+        with self._cache_lock:
+            return self._get(locator)
 
     def reserve_cache(self, locator):
         '''Reserve a cache slot for the specified locator,
         or return the existing slot.'''
-        self._cache_lock.acquire()
-        try:
-            # Test if the locator is already in the cache
-            for i in xrange(0, len(self._cache)):
-                if self._cache[i].locator == locator:
-                    n = self._cache[i]
-                    if i != 0:
-                        # move it to the front
-                        del self._cache[i]
-                        self._cache.insert(0, n)
-                    return n, False
-
-            # Add a new cache slot for the locator
-            n = KeepBlockCache.CacheSlot(locator)
-            self._cache.insert(0, n)
-            return n, True
-        finally:
-            self._cache_lock.release()
+        with self._cache_lock:
+            n = self._get(locator)
+            if n:
+                return n, False
+            else:
+                # Add a new cache slot for the locator
+                n = KeepBlockCache.CacheSlot(locator)
+                self._cache.insert(0, n)
+                return n, True
 
 class KeepClient(object):
 
@@ -279,10 +286,11 @@ class KeepClient(object):
         HTTP_ERRORS = (requests.exceptions.RequestException,
                        socket.error, ssl.SSLError)
 
-        def __init__(self, root, **headers):
+        def __init__(self, root, session, **headers):
             self.root = root
             self.last_result = None
             self.success_flag = None
+            self.session = session
             self.get_headers = {'Accept': 'application/octet-stream'}
             self.get_headers.update(headers)
             self.put_headers = headers
@@ -305,7 +313,7 @@ class KeepClient(object):
             _logger.debug("Request: GET %s", url)
             try:
                 with timer.Timer() as t:
-                    result = requests.get(url.encode('utf-8'),
+                    result = self.session.get(url.encode('utf-8'),
                                           headers=self.get_headers,
                                           timeout=timeout)
             except self.HTTP_ERRORS as e:
@@ -318,7 +326,7 @@ class KeepClient(object):
                 content = result.content
                 _logger.info("%s response: %s bytes in %s msec (%.3f MiB/sec)",
                              self.last_status(), len(content), t.msecs,
-                             (len(content)/(1024.0*1024))/t.secs)
+                             (len(content)/(1024.0*1024))/t.secs if t.secs > 0 else 0)
                 if self.success_flag:
                     resp_md5 = hashlib.md5(content).hexdigest()
                     if resp_md5 == locator.md5sum:
@@ -331,7 +339,7 @@ class KeepClient(object):
             url = self.root + hash_s
             _logger.debug("Request: PUT %s", url)
             try:
-                result = requests.put(url.encode('utf-8'),
+                result = self.session.put(url.encode('utf-8'),
                                       data=body,
                                       headers=self.put_headers,
                                       timeout=timeout)
@@ -371,9 +379,10 @@ class KeepClient(object):
         def run_with_limiter(self, limiter):
             if self.service.finished():
                 return
-            _logger.debug("KeepWriterThread %s proceeding %s %s",
+            _logger.debug("KeepWriterThread %s proceeding %s+%i %s",
                           str(threading.current_thread()),
                           self.args['data_hash'],
+                          len(self.args['data']),
                           self.args['service_root'])
             self._success = bool(self.service.put(
                 self.args['data_hash'],
@@ -382,9 +391,10 @@ class KeepClient(object):
             status = self.service.last_status()
             if self._success:
                 result = self.service.last_result
-                _logger.debug("KeepWriterThread %s succeeded %s %s",
+                _logger.debug("KeepWriterThread %s succeeded %s+%i %s",
                               str(threading.current_thread()),
                               self.args['data_hash'],
+                              len(self.args['data']),
                               self.args['service_root'])
                 # Tick the 'done' counter for the number of replica
                 # reported stored by the server, for the case that
@@ -404,7 +414,7 @@ class KeepClient(object):
     def __init__(self, api_client=None, proxy=None,
                  timeout=DEFAULT_TIMEOUT, proxy_timeout=DEFAULT_PROXY_TIMEOUT,
                  api_token=None, local_store=None, block_cache=None,
-                 num_retries=0):
+                 num_retries=0, session=None):
         """Initialize a new KeepClient.
 
         Arguments:
@@ -462,6 +472,7 @@ class KeepClient(object):
             self.put = self.local_store_put
         else:
             self.num_retries = num_retries
+            self.session = session if session is not None else requests.Session()
             if proxy:
                 if not proxy.endswith('/'):
                     proxy += '/'
@@ -553,7 +564,7 @@ class KeepClient(object):
         local_roots = self.weighted_service_roots(md5_s, force_rebuild)
         for root in local_roots:
             if root not in roots_map:
-                roots_map[root] = self.KeepService(root, **headers)
+                roots_map[root] = self.KeepService(root, self.session, **headers)
         return local_roots
 
     @staticmethod
@@ -574,6 +585,14 @@ class KeepClient(object):
         else:
             return None
 
+    def get_from_cache(self, loc):
+        """Fetch a block only if is in the cache, otherwise return None."""
+        slot = self.block_cache.get(loc)
+        if slot.ready.is_set():
+            return slot.get()
+        else:
+            return None
+
     @retry.retry_method
     def get(self, loc_s, num_retries=None):
         """Get data from Keep.
@@ -599,7 +618,6 @@ class KeepClient(object):
             return ''.join(self.get(x) for x in loc_s.split(','))
         locator = KeepLocator(loc_s)
         expect_hash = locator.md5sum
-
         slot, first = self.block_cache.reserve_cache(expect_hash)
         if not first:
             v = slot.get()
@@ -613,7 +631,7 @@ class KeepClient(object):
         hint_roots = ['http://keep.{}.arvadosapi.com/'.format(hint[2:])
                       for hint in locator.hints if hint.startswith('K@')]
         # Map root URLs their KeepService objects.
-        roots_map = {root: self.KeepService(root) for root in hint_roots}
+        roots_map = {root: self.KeepService(root, self.session) for root in hint_roots}
         blob = None
         loop = retry.RetryLoop(num_retries, self._check_loop_result,
                                backoff_start=2)
@@ -767,3 +785,6 @@ class KeepClient(object):
             return ''
         with open(os.path.join(self.local_store, locator.md5sum), 'r') as f:
             return f.read()
+
+    def is_cached(self, locator):
+        return self.block_cache.reserve_cache(expect_hash)
diff --git a/sdk/python/arvados/ranges.py b/sdk/python/arvados/ranges.py
new file mode 100644 (file)
index 0000000..2a08b3b
--- /dev/null
@@ -0,0 +1,217 @@
+import logging
+
+_logger = logging.getLogger('arvados.ranges')
+
+class Range(object):
+    def __init__(self, locator, range_start, range_size, segment_offset=0):
+        self.locator = locator
+        self.range_start = range_start
+        self.range_size = range_size
+        self.segment_offset = segment_offset
+
+    def __repr__(self):
+        return "Range(\"%s\", %i, %i, %i)" % (self.locator, self.range_start, self.range_size, self.segment_offset)
+
+    def __eq__(self, other):
+        return (self.locator == other.locator and
+                self.range_start == other.range_start and
+                self.range_size == other.range_size and
+                self.segment_offset == other.segment_offset)
+
+def first_block(data_locators, range_start, range_size):
+    block_start = 0L
+
+    # range_start/block_start is the inclusive lower bound
+    # range_end/block_end is the exclusive upper bound
+
+    hi = len(data_locators)
+    lo = 0
+    i = int((hi + lo) / 2)
+    block_size = data_locators[i].range_size
+    block_start = data_locators[i].range_start
+    block_end = block_start + block_size
+
+    # perform a binary search for the first block
+    # assumes that all of the blocks are contigious, so range_start is guaranteed
+    # to either fall into the range of a block or be outside the block range entirely
+    while not (range_start >= block_start and range_start < block_end):
+        if lo == i:
+            # must be out of range, fail
+            return None
+        if range_start > block_start:
+            lo = i
+        else:
+            hi = i
+        i = int((hi + lo) / 2)
+        block_size = data_locators[i].range_size
+        block_start = data_locators[i].range_start
+        block_end = block_start + block_size
+
+    return i
+
+class LocatorAndRange(object):
+    def __init__(self, locator, block_size, segment_offset, segment_size):
+        self.locator = locator
+        self.block_size = block_size
+        self.segment_offset = segment_offset
+        self.segment_size = segment_size
+
+    def __eq__(self, other):
+        return  (self.locator == other.locator and
+                 self.block_size == other.block_size and
+                 self.segment_offset == other.segment_offset and
+                 self.segment_size == other.segment_size)
+
+    def __repr__(self):
+        return "LocatorAndRange(\"%s\", %i, %i, %i)" % (self.locator, self.block_size, self.segment_offset, self.segment_size)
+
+def locators_and_ranges(data_locators, range_start, range_size):
+    '''Get blocks that are covered by the range and return list of LocatorAndRange
+    objects.
+
+    :data_locators:
+      list of Range objects, assumes that blocks are in order and contigous
+
+    :range_start:
+      start of range
+
+    :range_size:
+      size of range
+
+    '''
+    if range_size == 0:
+        return []
+    resp = []
+    range_start = long(range_start)
+    range_size = long(range_size)
+    range_end = range_start + range_size
+
+    i = first_block(data_locators, range_start, range_size)
+    if i is None:
+        return []
+
+    while i < len(data_locators):
+        dl = data_locators[i]
+        block_start = dl.range_start
+        block_size = dl.range_size
+        block_end = block_start + block_size
+        _logger.debug(dl.locator, "range_start", range_start, "block_start", block_start, "range_end", range_end, "block_end", block_end)
+        if range_end <= block_start:
+            # range ends before this block starts, so don't look at any more locators
+            break
+
+        #if range_start >= block_end:
+            # Range starts after this block ends, so go to next block.
+            # We should always start at the first block due to the binary
+            # search above, so this test is unnecessary but useful to help
+            # document the algorithm.
+            #next
+
+        if range_start >= block_start and range_end <= block_end:
+            # range starts and ends in this block
+            resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset + (range_start - block_start), range_size))
+        elif range_start >= block_start and range_end > block_end:
+            # range starts in this block
+            resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset + (range_start - block_start), block_end - range_start))
+        elif range_start < block_start and range_end > block_end:
+            # range starts in a previous block and extends to further blocks
+            resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset, block_size))
+        elif range_start < block_start and range_end <= block_end:
+            # range starts in a previous block and ends in this block
+            resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset, range_end - block_start))
+        block_start = block_end
+        i += 1
+    return resp
+
+def replace_range(data_locators, new_range_start, new_range_size, new_locator, new_segment_offset):
+    '''
+    Replace a file segment range with a new segment.
+
+    NOTE::
+    data_locators will be updated in place
+
+    :data_locators:
+      list of Range objects, assumes that segments are in order and contigous
+
+    :new_range_start:
+      start of range to replace in data_locators
+
+    :new_range_size:
+      size of range to replace in data_locators
+
+    :new_locator:
+      locator for new segment to be inserted
+
+    :new_segment_offset:
+      segment offset within the locator
+
+    '''
+    if new_range_size == 0:
+        return
+
+    new_range_start = long(new_range_start)
+    new_range_size = long(new_range_size)
+    new_range_end = new_range_start + new_range_size
+
+    if len(data_locators) == 0:
+        data_locators.append(Range(new_locator, new_range_start, new_range_size, new_segment_offset))
+        return
+
+    last = data_locators[-1]
+    if (last.range_start+last.range_size) == new_range_start:
+        if last.locator == new_locator:
+            # extend last segment
+            last.range_size += new_range_size
+        else:
+            data_locators.append(Range(new_locator, new_range_start, new_range_size, new_segment_offset))
+        return
+
+    i = first_block(data_locators, new_range_start, new_range_size)
+    if i is None:
+        return
+
+    while i < len(data_locators):
+        dl = data_locators[i]
+        old_segment_start = dl.range_start
+        old_segment_end = old_segment_start + dl.range_size
+        _logger.debug(dl, "range_start", new_range_start, "segment_start", old_segment_start, "range_end", new_range_end, "segment_end", old_segment_end)
+        if new_range_end <= old_segment_start:
+            # range ends before this segment starts, so don't look at any more locators
+            break
+
+        #if range_start >= old_segment_end:
+            # Range starts after this segment ends, so go to next segment.
+            # We should always start at the first segment due to the binary
+            # search above, so this test is unnecessary but useful to help
+            # document the algorithm.
+            #next
+
+        if  old_segment_start <= new_range_start and new_range_end <= old_segment_end:
+            # new range starts and ends in old segment
+            # split segment into up to 3 pieces
+            if (new_range_start-old_segment_start) > 0:
+                data_locators[i] = Range(dl.locator, old_segment_start, (new_range_start-old_segment_start), dl.segment_offset)
+                data_locators.insert(i+1, Range(new_locator, new_range_start, new_range_size, new_segment_offset))
+            else:
+                data_locators[i] = Range(new_locator, new_range_start, new_range_size, new_segment_offset)
+                i -= 1
+            if (old_segment_end-new_range_end) > 0:
+                data_locators.insert(i+2, Range(dl.locator, new_range_end, (old_segment_end-new_range_end), dl.segment_offset + (new_range_start-old_segment_start) + new_range_size))
+            return
+        elif old_segment_start <= new_range_start and new_range_end > old_segment_end:
+            # range starts in this segment
+            # split segment into 2 pieces
+            data_locators[i] = Range(dl.locator, old_segment_start, (new_range_start-old_segment_start), dl.segment_offset)
+            data_locators.insert(i+1, Range(new_locator, new_range_start, new_range_size, new_segment_offset))
+            i += 1
+        elif new_range_start < old_segment_start and new_range_end >= old_segment_end:
+            # range starts in a previous segment and extends to further segments
+            # delete this segment
+            del data_locators[i]
+            i -= 1
+        elif new_range_start < old_segment_start and new_range_end < old_segment_end:
+            # range starts in a previous segment and ends in this segment
+            # move the starting point of this segment up, and shrink it.
+            data_locators[i] = Range(dl.locator, new_range_end, (old_segment_end-new_range_end), dl.segment_offset + (new_range_end-old_segment_start))
+            return
+        i += 1
diff --git a/sdk/python/arvados/safeapi.py b/sdk/python/arvados/safeapi.py
new file mode 100644 (file)
index 0000000..9737da1
--- /dev/null
@@ -0,0 +1,32 @@
+import threading
+import api
+import keep
+import config
+import copy
+
+class ThreadSafeApiCache(object):
+    """Threadsafe wrapper for API objects.  This stores and returns a different api
+    object per thread, because httplib2 which underlies apiclient is not
+    threadsafe.
+    """
+
+    def __init__(self, apiconfig=None, keep_params={}):
+        if apiconfig is None:
+            apiconfig = config.settings()
+        self.apiconfig = copy.copy(apiconfig)
+        self.local = threading.local()
+        self.keep = keep.KeepClient(api_client=self, **keep_params)
+
+    def localapi(self):
+        if 'api' not in self.local.__dict__:
+            self.local.api = api.api('v1', False, apiconfig=self.apiconfig)
+        return self.local.api
+
+    def __getattr__(self, name):
+        # Proxy nonexistent attributes to the thread-local API client.
+        if name == "api_token":
+            return self.apiconfig['ARVADOS_API_TOKEN']
+        try:
+            return getattr(self.localapi(), name)
+        except AttributeError:
+            return super(ThreadSafeApiCache, self).__getattr__(name)
index c263dd871be9d663d4aceebbfaf18f07aecd71dc..9cfceb8f4960f53948f0e24557ff7b1af3068b08 100644 (file)
-import bz2
 import collections
 import hashlib
 import os
 import re
-import zlib
+import threading
+import functools
+import copy
 
-from .arvfile import ArvadosFileBase
+from .ranges import *
+from .arvfile import StreamFileReader
 from arvados.retry import retry_method
 from keep import *
 import config
 import errors
 
-LOCATOR = 0
-BLOCKSIZE = 1
-OFFSET = 2
-SEGMENTSIZE = 3
+def locator_block_size(loc):
+    s = re.match(r'[0-9a-f]{32}\+(\d+)(\+\S+)*', loc)
+    return long(s.group(1))
 
-def locators_and_ranges(data_locators, range_start, range_size, debug=False):
+def normalize_stream(s, stream):
     '''
-    Get blocks that are covered by the range
-    data_locators: list of [locator, block_size, block_start], assumes that blocks are in order and contigous
-    range_start: start of range
-    range_size: size of range
-    returns list of [block locator, blocksize, segment offset, segment size] that satisfies the range
+    s is the stream name
+    stream is a dict mapping each filename to a list in the form [block locator, block size, segment offset (from beginning of block), segment size]
+    returns the stream as a list of tokens
     '''
-    if range_size == 0:
-        return []
-    resp = []
-    range_start = long(range_start)
-    range_size = long(range_size)
-    range_end = range_start + range_size
-    block_start = 0L
-
-    # range_start/block_start is the inclusive lower bound
-    # range_end/block_end is the exclusive upper bound
-
-    hi = len(data_locators)
-    lo = 0
-    i = int((hi + lo) / 2)
-    block_size = data_locators[i][BLOCKSIZE]
-    block_start = data_locators[i][OFFSET]
-    block_end = block_start + block_size
-    if debug: print '---'
-
-    # perform a binary search for the first block
-    # assumes that all of the blocks are contigious, so range_start is guaranteed
-    # to either fall into the range of a block or be outside the block range entirely
-    while not (range_start >= block_start and range_start < block_end):
-        if lo == i:
-            # must be out of range, fail
-            return []
-        if range_start > block_start:
-            lo = i
-        else:
-            hi = i
-        i = int((hi + lo) / 2)
-        if debug: print lo, i, hi
-        block_size = data_locators[i][BLOCKSIZE]
-        block_start = data_locators[i][OFFSET]
-        block_end = block_start + block_size
-
-    while i < len(data_locators):
-        locator, block_size, block_start = data_locators[i]
-        block_end = block_start + block_size
-        if debug:
-            print locator, "range_start", range_start, "block_start", block_start, "range_end", range_end, "block_end", block_end
-        if range_end <= block_start:
-            # range ends before this block starts, so don't look at any more locators
-            break
-
-        #if range_start >= block_end:
-            # range starts after this block ends, so go to next block
-            # we should always start at the first block due to the binary above, so this test is redundant
-            #next
-
-        if range_start >= block_start and range_end <= block_end:
-            # range starts and ends in this block
-            resp.append([locator, block_size, range_start - block_start, range_size])
-        elif range_start >= block_start and range_end > block_end:
-            # range starts in this block
-            resp.append([locator, block_size, range_start - block_start, block_end - range_start])
-        elif range_start < block_start and range_end > block_end:
-            # range starts in a previous block and extends to further blocks
-            resp.append([locator, block_size, 0L, block_size])
-        elif range_start < block_start and range_end <= block_end:
-            # range starts in a previous block and ends in this block
-            resp.append([locator, block_size, 0L, range_end - block_start])
-        block_start = block_end
-        i += 1
-    return resp
-
-def split(path):
-    """split(path) -> streamname, filename
-
-    Separate the stream name and file name in a /-separated stream path.
-    If no stream name is available, assume '.'.
-    """
-    try:
-        stream_name, file_name = path.rsplit('/', 1)
-    except ValueError:  # No / in string
-        stream_name, file_name = '.', path
-    return stream_name, file_name
-
-class StreamFileReader(ArvadosFileBase):
-    class _NameAttribute(str):
-        # The Python file API provides a plain .name attribute.
-        # Older SDK provided a name() method.
-        # This class provides both, for maximum compatibility.
-        def __call__(self):
-            return self
-
-
-    def __init__(self, stream, segments, name):
-        super(StreamFileReader, self).__init__(self._NameAttribute(name), 'rb')
-        self._stream = stream
-        self.segments = segments
-        self._filepos = 0L
-        self.num_retries = stream.num_retries
-        self._readline_cache = (None, None)
-
-    def __iter__(self):
-        while True:
-            data = self.readline()
-            if not data:
-                break
-            yield data
-
-    def decompressed_name(self):
-        return re.sub('\.(bz2|gz)$', '', self.name)
-
-    def stream_name(self):
-        return self._stream.name()
-
-    @ArvadosFileBase._before_close
-    def seek(self, pos, whence=os.SEEK_CUR):
-        if whence == os.SEEK_CUR:
-            pos += self._filepos
-        elif whence == os.SEEK_END:
-            pos += self.size()
-        self._filepos = min(max(pos, 0L), self.size())
-
-    def tell(self):
-        return self._filepos
-
-    def size(self):
-        n = self.segments[-1]
-        return n[OFFSET] + n[BLOCKSIZE]
-
-    @ArvadosFileBase._before_close
-    @retry_method
-    def read(self, size, num_retries=None):
-        """Read up to 'size' bytes from the stream, starting at the current file position"""
-        if size == 0:
-            return ''
-
-        data = ''
-        available_chunks = locators_and_ranges(self.segments, self._filepos, size)
-        if available_chunks:
-            locator, blocksize, segmentoffset, segmentsize = available_chunks[0]
-            data = self._stream.readfrom(locator+segmentoffset, segmentsize,
-                                         num_retries=num_retries)
-
-        self._filepos += len(data)
-        return data
-
-    @ArvadosFileBase._before_close
-    @retry_method
-    def readfrom(self, start, size, num_retries=None):
-        """Read up to 'size' bytes from the stream, starting at 'start'"""
-        if size == 0:
-            return ''
-
-        data = []
-        for locator, blocksize, segmentoffset, segmentsize in locators_and_ranges(self.segments, start, size):
-            data.append(self._stream.readfrom(locator+segmentoffset, segmentsize,
-                                              num_retries=num_retries))
-        return ''.join(data)
-
-    @ArvadosFileBase._before_close
-    @retry_method
-    def readall(self, size=2**20, num_retries=None):
-        while True:
-            data = self.read(size, num_retries=num_retries)
-            if data == '':
-                break
-            yield data
-
-    @ArvadosFileBase._before_close
-    @retry_method
-    def readline(self, size=float('inf'), num_retries=None):
-        cache_pos, cache_data = self._readline_cache
-        if self.tell() == cache_pos:
-            data = [cache_data]
-        else:
-            data = ['']
-        data_size = len(data[-1])
-        while (data_size < size) and ('\n' not in data[-1]):
-            next_read = self.read(2 ** 20, num_retries=num_retries)
-            if not next_read:
-                break
-            data.append(next_read)
-            data_size += len(next_read)
-        data = ''.join(data)
-        try:
-            nextline_index = data.index('\n') + 1
-        except ValueError:
-            nextline_index = len(data)
-        nextline_index = min(nextline_index, size)
-        self._readline_cache = (self.tell(), data[nextline_index:])
-        return data[:nextline_index]
-
-    @ArvadosFileBase._before_close
-    @retry_method
-    def decompress(self, decompress, size, num_retries=None):
-        for segment in self.readall(size, num_retries):
-            data = decompress(segment)
-            if data:
-                yield data
+    stream_tokens = [s]
+    sortedfiles = list(stream.keys())
+    sortedfiles.sort()
+
+    blocks = {}
+    streamoffset = 0L
+    # Go through each file and add each referenced block exactly once.
+    for f in sortedfiles:
+        for b in stream[f]:
+            if b.locator not in blocks:
+                stream_tokens.append(b.locator)
+                blocks[b.locator] = streamoffset
+                streamoffset += locator_block_size(b.locator)
+
+    # Add the empty block if the stream is otherwise empty.
+    if len(stream_tokens) == 1:
+        stream_tokens.append(config.EMPTY_BLOCK_LOCATOR)
+
+    for f in sortedfiles:
+        # Add in file segments
+        current_span = None
+        fout = f.replace(' ', '\\040')
+        for segment in stream[f]:
+            # Collapse adjacent segments
+            streamoffset = blocks[segment.locator] + segment.segment_offset
+            if current_span is None:
+                current_span = [streamoffset, streamoffset + segment.segment_size]
+            else:
+                if streamoffset == current_span[1]:
+                    current_span[1] += segment.segment_size
+                else:
+                    stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
+                    current_span = [streamoffset, streamoffset + segment.segment_size]
 
-    @ArvadosFileBase._before_close
-    @retry_method
-    def readall_decompressed(self, size=2**20, num_retries=None):
-        self.seek(0)
-        if self.name.endswith('.bz2'):
-            dc = bz2.BZ2Decompressor()
-            return self.decompress(dc.decompress, size,
-                                   num_retries=num_retries)
-        elif self.name.endswith('.gz'):
-            dc = zlib.decompressobj(16+zlib.MAX_WBITS)
-            return self.decompress(lambda segment: dc.decompress(dc.unconsumed_tail + segment),
-                                   size, num_retries=num_retries)
-        else:
-            return self.readall(size, num_retries=num_retries)
+        if current_span is not None:
+            stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
 
-    @ArvadosFileBase._before_close
-    @retry_method
-    def readlines(self, sizehint=float('inf'), num_retries=None):
-        data = []
-        data_size = 0
-        for s in self.readall(num_retries=num_retries):
-            data.append(s)
-            data_size += len(s)
-            if data_size >= sizehint:
-                break
-        return ''.join(data).splitlines(True)
+        if not stream[f]:
+            stream_tokens.append("0:0:{0}".format(fout))
 
-    def as_manifest(self):
-        manifest_text = ['.']
-        manifest_text.extend([d[LOCATOR] for d in self._stream._data_locators])
-        manifest_text.extend(["{}:{}:{}".format(seg[LOCATOR], seg[BLOCKSIZE], self.name().replace(' ', '\\040')) for seg in self.segments])
-        return arvados.CollectionReader(' '.join(manifest_text) + '\n').manifest_text(normalize=True)
+    return stream_tokens
 
 
 class StreamReader(object):
@@ -273,7 +87,7 @@ class StreamReader(object):
             s = re.match(r'^[0-9a-f]{32}\+(\d+)(\+\S+)*$', tok)
             if s:
                 blocksize = long(s.group(1))
-                self._data_locators.append([tok, blocksize, streamoffset])
+                self._data_locators.append(Range(tok, streamoffset, blocksize))
                 streamoffset += blocksize
                 continue
 
@@ -283,10 +97,10 @@ class StreamReader(object):
                 size = long(s.group(2))
                 name = s.group(3).replace('\\040', ' ')
                 if name not in self._files:
-                    self._files[name] = StreamFileReader(self, [[pos, size, 0]], name)
+                    self._files[name] = StreamFileReader(self, [Range(pos, 0, size)], name)
                 else:
-                    n = self._files[name]
-                    n.segments.append([pos, size, n.size()])
+                    filereader = self._files[name]
+                    filereader.segments.append(Range(pos, filereader.size(), size))
                 continue
 
             raise errors.SyntaxError("Invalid manifest format")
@@ -300,34 +114,45 @@ class StreamReader(object):
     def all_files(self):
         return self._files.values()
 
-    def size(self):
+    def _size(self):
         n = self._data_locators[-1]
-        return n[OFFSET] + n[BLOCKSIZE]
+        return n.range_start + n.range_size
+
+    def size(self):
+        return self._size()
 
     def locators_and_ranges(self, range_start, range_size):
         return locators_and_ranges(self._data_locators, range_start, range_size)
 
+    @retry_method
+    def _keepget(self, locator, num_retries=None):
+        return self._keep.get(locator, num_retries=num_retries)
+
     @retry_method
     def readfrom(self, start, size, num_retries=None):
+        return self._readfrom(start, size, num_retries=num_retries)
+
+    @retry_method
+    def _readfrom(self, start, size, num_retries=None):
         """Read up to 'size' bytes from the stream, starting at 'start'"""
         if size == 0:
             return ''
         if self._keep is None:
             self._keep = KeepClient(num_retries=self.num_retries)
         data = []
-        for locator, blocksize, segmentoffset, segmentsize in locators_and_ranges(self._data_locators, start, size):
-            data.append(self._keep.get(locator, num_retries=num_retries)[segmentoffset:segmentoffset+segmentsize])
+        for lr in locators_and_ranges(self._data_locators, start, size):
+            data.append(self._keepget(lr.locator, num_retries=num_retries)[lr.segment_offset:lr.segment_offset+lr.segment_size])
         return ''.join(data)
 
     def manifest_text(self, strip=False):
         manifest_text = [self.name().replace(' ', '\\040')]
         if strip:
             for d in self._data_locators:
-                m = re.match(r'^[0-9a-f]{32}\+\d+', d[LOCATOR])
+                m = re.match(r'^[0-9a-f]{32}\+\d+', d.locator)
                 manifest_text.append(m.group(0))
         else:
-            manifest_text.extend([d[LOCATOR] for d in self._data_locators])
-        manifest_text.extend([' '.join(["{}:{}:{}".format(seg[LOCATOR], seg[BLOCKSIZE], f.name().replace(' ', '\\040'))
+            manifest_text.extend([d.locator for d in self._data_locators])
+        manifest_text.extend([' '.join(["{}:{}:{}".format(seg.locator, seg.range_size, f.name.replace(' ', '\\040'))
                                         for seg in f.segments])
                               for f in self._files.values()])
         return ' '.join(manifest_text) + '\n'
index 378e93f3861633f82af2b253515877833c21052d..49ed79cc4570f4e0915392ba9774df26e39cf5e5 100644 (file)
@@ -53,17 +53,34 @@ def fake_requests_response(code, body, **headers):
     r.raw = io.BytesIO(body)
     return r
 
-def mock_get_responses(body, *codes, **headers):
-    return mock.patch('requests.get', side_effect=queue_with((
-        fake_requests_response(code, body, **headers) for code in codes)))
+# The following methods patch requests.Session(), where return_value is a mock
+# Session object.  The put/get attributes are set on mock Session, and the
+# desired put/get behavior is set on the put/get mocks.
 
 def mock_put_responses(body, *codes, **headers):
-    return mock.patch('requests.put', side_effect=queue_with((
-        fake_requests_response(code, body, **headers) for code in codes)))
+    m = mock.MagicMock()
+    if isinstance(body, tuple):
+        codes = list(codes)
+        codes.insert(0, body)
+        m.return_value.put.side_effect = queue_with((fake_requests_response(code, b, **headers) for b, code in codes))
+    else:
+        m.return_value.put.side_effect = queue_with((fake_requests_response(code, body, **headers) for code in codes))
+    return mock.patch('requests.Session', m)
+
+def mock_get_responses(body, *codes, **headers):
+    m = mock.MagicMock()
+    m.return_value.get.side_effect = queue_with((fake_requests_response(code, body, **headers) for code in codes))
+    return mock.patch('requests.Session', m)
 
-def mock_requestslib_responses(method, body, *codes, **headers):
-    return mock.patch(method, side_effect=queue_with((
-        fake_requests_response(code, body, **headers) for code in codes)))
+def mock_get(side_effect):
+    m = mock.MagicMock()
+    m.return_value.get.side_effect = side_effect
+    return mock.patch('requests.Session', m)
+
+def mock_put(side_effect):
+    m = mock.MagicMock()
+    m.return_value.put.side_effect = side_effect
+    return mock.patch('requests.Session', m)
 
 class MockStreamReader(object):
     def __init__(self, name='.', *data):
@@ -77,8 +94,10 @@ class MockStreamReader(object):
         return self._name
 
     def readfrom(self, start, size, num_retries=None):
-        return self._data[start:start + size]
+        self._readfrom(start, size, num_retries=num_retries)
 
+    def _readfrom(self, start, size, num_retries=None):
+        return self._data[start:start + size]
 
 class ApiClientMock(object):
     def api_client_mock(self):
diff --git a/sdk/python/tests/test_arvfile.py b/sdk/python/tests/test_arvfile.py
new file mode 100644 (file)
index 0000000..aaf79a2
--- /dev/null
@@ -0,0 +1,551 @@
+#!/usr/bin/env python
+
+import bz2
+import gzip
+import io
+import mock
+import os
+import unittest
+import hashlib
+
+import arvados
+from arvados import Range, KeepLocator
+from arvados.collection import import_manifest, export_manifest, ReadOnlyCollection, WritableCollection
+from arvados.arvfile import ArvadosFile, ArvadosFileReader, SYNC_READONLY, SYNC_EXPLICIT
+
+import arvados_testutil as tutil
+from test_stream import StreamFileReaderTestCase, StreamRetryTestMixin
+
+class ArvadosFileWriterTestCase(unittest.TestCase):
+    class MockKeep(object):
+        def __init__(self, blocks):
+            self.blocks = blocks
+            self.requests = []
+        def get(self, locator, num_retries=0):
+            self.requests.append(locator)
+            return self.blocks.get(locator)
+        def get_from_cache(self, locator):
+            self.requests.append(locator)
+            return self.blocks.get(locator)
+        def put(self, data, num_retries=None):
+            pdh = "%s+%i" % (hashlib.md5(data).hexdigest(), len(data))
+            self.blocks[pdh] = str(data)
+            return pdh
+
+    class MockApi(object):
+        def __init__(self, b, r):
+            self.b = b
+            self.r = r
+        class MockCollections(object):
+            def __init__(self, b, r):
+                self.b = b
+                self.r = r
+            class Execute(object):
+                def __init__(self, r):
+                    self.r = r
+                def execute(self, num_retries=None):
+                    return self.r
+            def create(self, ensure_unique_name=False, body=None):
+                if body != self.b:
+                    raise Exception("Body %s does not match expectation %s" % (body, self.b))
+                return ArvadosFileWriterTestCase.MockApi.MockCollections.Execute(self.r)
+            def update(self, uuid=None, body=None):
+                return ArvadosFileWriterTestCase.MockApi.MockCollections.Execute(self.r)
+        def collections(self):
+            return ArvadosFileWriterTestCase.MockApi.MockCollections(self.b, self.r)
+
+
+    def test_truncate(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+        api = ArvadosFileWriterTestCase.MockApi({"name":"test_truncate",
+                                                 "manifest_text":". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\n"},
+                                                {"uuid":"zzzzz-4zz18-mockcollection0"})
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+                             api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "r+")
+            self.assertEqual(writer.size(), 10)
+            writer.seek(5)
+            self.assertEqual("56789", writer.read(8))
+            writer.truncate(8)
+            writer.seek(5, os.SEEK_SET)
+            self.assertEqual("567", writer.read(8))
+
+            self.assertEqual(None, c._manifest_locator)
+            self.assertEqual(True, c.modified())
+            c.save_new("test_truncate")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+            self.assertEqual(False, c.modified())
+
+    def test_append0(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+        api = ArvadosFileWriterTestCase.MockApi({"name":"test_append",
+                                                 "manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:13:count.txt\n"},
+                                                {"uuid":"zzzzz-4zz18-mockcollection0"})
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+                             api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "r+")
+            self.assertEqual(writer.size(), 10)
+
+            writer.seek(5, os.SEEK_SET)
+            self.assertEqual("56789", writer.read(8))
+
+            writer.seek(10, os.SEEK_SET)
+            writer.write("foo")
+            self.assertEqual(writer.size(), 13)
+
+            writer.seek(5, os.SEEK_SET)
+            self.assertEqual("56789foo", writer.read(8))
+
+            self.assertEqual(None, c._manifest_locator)
+            self.assertEqual(True, c.modified())
+            self.assertEqual(None, keep.get("acbd18db4cc2f85cedef654fccc4a4d8+3"))
+
+            c.save_new("test_append")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+            self.assertEqual(False, c.modified())
+            self.assertEqual("foo", keep.get("acbd18db4cc2f85cedef654fccc4a4d8+3"))
+
+
+    def test_append1(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+        c = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n', keep_client=keep)
+        writer = c.open("count.txt", "a+")
+        self.assertEqual(writer.read(20), "0123456789")
+        writer.seek(0, os.SEEK_SET)
+
+        writer.write("hello")
+        self.assertEqual(writer.read(20), "0123456789hello")
+        writer.seek(0, os.SEEK_SET)
+
+        writer.write("world")
+        self.assertEqual(writer.read(20), "0123456789helloworld")
+
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 fc5e038d38a57032085441e7fe7010b0+10 0:20:count.txt\n", export_manifest(c))
+
+    def test_write0(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "r+")
+            self.assertEqual("0123456789", writer.readfrom(0, 13))
+            writer.seek(0, os.SEEK_SET)
+            writer.write("foo")
+            self.assertEqual(writer.size(), 10)
+            self.assertEqual("foo3456789", writer.readfrom(0, 13))
+            self.assertEqual(". acbd18db4cc2f85cedef654fccc4a4d8+3 781e5e245d69b566979b86e28d23f2c7+10 0:3:count.txt 6:7:count.txt\n", export_manifest(c))
+
+    def test_write1(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "r+")
+            self.assertEqual("0123456789", writer.readfrom(0, 13))
+            writer.seek(3, os.SEEK_SET)
+            writer.write("foo")
+            self.assertEqual(writer.size(), 10)
+            self.assertEqual("012foo6789", writer.readfrom(0, 13))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:count.txt 10:3:count.txt 6:4:count.txt\n", export_manifest(c))
+
+    def test_write2(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "r+")
+            self.assertEqual("0123456789", writer.readfrom(0, 13))
+            writer.seek(7, os.SEEK_SET)
+            writer.write("foo")
+            self.assertEqual(writer.size(), 10)
+            self.assertEqual("0123456foo", writer.readfrom(0, 13))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:7:count.txt 10:3:count.txt\n", export_manifest(c))
+
+    def test_write3(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt 0:10:count.txt\n',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "r+")
+            self.assertEqual("012345678901234", writer.readfrom(0, 15))
+            writer.seek(7, os.SEEK_SET)
+            writer.write("foobar")
+            self.assertEqual(writer.size(), 20)
+            self.assertEqual("0123456foobar34", writer.readfrom(0, 15))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 3858f62230ac3c915f300c664312c63f+6 0:7:count.txt 10:6:count.txt 3:7:count.txt\n", export_manifest(c))
+
+    def test_write4(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:4:count.txt 0:4:count.txt 0:4:count.txt',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "r+")
+            self.assertEqual("012301230123", writer.readfrom(0, 15))
+            writer.seek(2, os.SEEK_SET)
+            writer.write("abcdefg")
+            self.assertEqual(writer.size(), 12)
+            self.assertEqual("01abcdefg123", writer.readfrom(0, 15))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 7ac66c0f148de9519b8bd264312c4d64+7 0:2:count.txt 10:7:count.txt 1:3:count.txt\n", export_manifest(c))
+
+    def test_write_large(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({"name":"test_write_large",
+                                                 "manifest_text": ". a5de24f4417cfba9d5825eadc2f4ca49+67108000 598cc1a4ccaef8ab6e4724d87e675d78+32892000 0:100000000:count.txt\n"},
+                                                {"uuid":"zzzzz-4zz18-mockcollection0"})
+        with WritableCollection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
+                             api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "r+")
+            text = ''.join(["0123456789" for a in xrange(0, 100)])
+            for b in xrange(0, 100000):
+                writer.write(text)
+            self.assertEqual(writer.size(), 100000000)
+
+            self.assertEqual(None, c._manifest_locator)
+            self.assertEqual(True, c.modified())
+            c.save_new("test_write_large")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+            self.assertEqual(False, c.modified())
+
+    def test_write_rewrite0(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        with WritableCollection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "r+")
+            for b in xrange(0, 10):
+                writer.seek(0, os.SEEK_SET)
+                writer.write("0123456789")
+            writer.arvadosfile._repack_writes()
+            self.assertEqual(writer.size(), 10)
+            self.assertEqual("0123456789", writer.readfrom(0, 20))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n", export_manifest(c))
+
+    def test_write_rewrite1(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "r+")
+            for b in xrange(0, 10):
+                writer.seek(10, os.SEEK_SET)
+                writer.write("abcdefghij")
+            writer.arvadosfile._repack_writes()
+            self.assertEqual(writer.size(), 20)
+            self.assertEqual("0123456789abcdefghij", writer.readfrom(0, 20))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 a925576942e94b2ef57a066101b48876+10 0:20:count.txt\n", export_manifest(c))
+
+    def test_write_rewrite2(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt',
+                             keep_client=keep) as c:
+            writer = c.open("count.txt", "r+")
+            for b in xrange(0, 10):
+                writer.seek(5, os.SEEK_SET)
+                writer.write("abcdefghij")
+            writer.arvadosfile._repack_writes()
+            self.assertEqual(writer.size(), 15)
+            self.assertEqual("01234abcdefghij", writer.readfrom(0, 20))
+            self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 a925576942e94b2ef57a066101b48876+10 0:5:count.txt 10:10:count.txt\n", export_manifest(c))
+
+    def test_write_large_rewrite0(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({"name":"test_write_large",
+                                                 "manifest_text": ". 37400a68af9abdd76ca5bf13e819e42a+32892003 a5de24f4417cfba9d5825eadc2f4ca49+67108000 32892000:3:count.txt 32892006:67107997:count.txt 0:32892000:count.txt\n"},
+                                                {"uuid":"zzzzz-4zz18-mockcollection0"})
+        with WritableCollection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
+                             api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "r+")
+            text = ''.join(["0123456789" for a in xrange(0, 100)])
+            for b in xrange(0, 100000):
+                writer.write(text)
+            writer.seek(0, os.SEEK_SET)
+            writer.write("foo")
+            self.assertEqual(writer.size(), 100000000)
+
+            self.assertEqual(None, c._manifest_locator)
+            self.assertEqual(True, c.modified())
+            c.save_new("test_write_large")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+            self.assertEqual(False, c.modified())
+
+    def test_create(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({"name":"test_create",
+                                                 "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"},
+                                                {"uuid":"zzzzz-4zz18-mockcollection0"})
+        with WritableCollection(api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "w+")
+            self.assertEqual(writer.size(), 0)
+            writer.write("01234567")
+            self.assertEqual(writer.size(), 8)
+
+            self.assertEqual(None, c._manifest_locator)
+            self.assertEqual(True, c.modified())
+            self.assertEqual(None, keep.get("2e9ec317e197819358fbc43afca7d837+8"))
+            c.save_new("test_create")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+            self.assertEqual(False, c.modified())
+            self.assertEqual("01234567", keep.get("2e9ec317e197819358fbc43afca7d837+8"))
+
+
+    def test_create_subdir(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({"name":"test_create",
+                                                 "manifest_text":"./foo/bar 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"},
+                                                {"uuid":"zzzzz-4zz18-mockcollection0"})
+        with WritableCollection(api_client=api, keep_client=keep) as c:
+            writer = c.open("foo/bar/count.txt", "w+")
+            writer.write("01234567")
+            c.save_new("test_create")
+
+    def test_overwrite(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+        api = ArvadosFileWriterTestCase.MockApi({"name":"test_overwrite",
+                                                 "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"},
+                                                {"uuid":"zzzzz-4zz18-mockcollection0"})
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+                             api_client=api, keep_client=keep) as c:
+            writer = c.open("count.txt", "w+")
+            self.assertEqual(writer.size(), 0)
+            writer.write("01234567")
+            self.assertEqual(writer.size(), 8)
+
+            self.assertEqual(None, c._manifest_locator)
+            self.assertEqual(True, c.modified())
+            c.save_new("test_overwrite")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+            self.assertEqual(False, c.modified())
+
+    def test_file_not_found(self):
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n') as c:
+            with self.assertRaises(IOError):
+                writer = c.open("nocount.txt", "r")
+
+    def test_cannot_open_directory(self):
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n') as c:
+            with self.assertRaises(IOError):
+                writer = c.open(".", "r")
+
+    def test_create_multiple(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        api = ArvadosFileWriterTestCase.MockApi({"name":"test_create_multiple",
+                                                 "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:8:count1.txt 8:8:count2.txt\n"},
+                                                {"uuid":"zzzzz-4zz18-mockcollection0"})
+        with WritableCollection(api_client=api, keep_client=keep) as c:
+            w1 = c.open("count1.txt", "w")
+            w2 = c.open("count2.txt", "w")
+            w1.write("01234567")
+            w2.write("abcdefgh")
+            self.assertEqual(w1.size(), 8)
+            self.assertEqual(w2.size(), 8)
+
+            self.assertEqual(None, c._manifest_locator)
+            self.assertEqual(True, c.modified())
+            self.assertEqual(None, keep.get("2e9ec317e197819358fbc43afca7d837+8"))
+            c.save_new("test_create_multiple")
+            self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+            self.assertEqual(False, c.modified())
+            self.assertEqual("01234567", keep.get("2e9ec317e197819358fbc43afca7d837+8"))
+
+
+class ArvadosFileReaderTestCase(StreamFileReaderTestCase):
+    class MockParent(object):
+        class MockBlockMgr(object):
+            def __init__(self, blocks, nocache):
+                self.blocks = blocks
+                self.nocache = nocache
+
+            def block_prefetch(self, loc):
+                pass
+
+            def get_block_contents(self, loc, num_retries=0, cache_only=False):
+                if self.nocache and cache_only:
+                    return None
+                return self.blocks[loc]
+
+        def __init__(self, blocks, nocache):
+            self.blocks = blocks
+            self.nocache = nocache
+            self.lock = arvados.arvfile.NoopLock()
+
+        def root_collection(self):
+            return self
+
+        def _my_block_manager(self):
+            return ArvadosFileReaderTestCase.MockParent.MockBlockMgr(self.blocks, self.nocache)
+
+        def sync_mode(self):
+            return SYNC_READONLY
+
+
+    def make_count_reader(self, nocache=False):
+        stream = []
+        n = 0
+        blocks = {}
+        for d in ['01234', '34567', '67890']:
+            loc = '{}+{}'.format(hashlib.md5(d).hexdigest(), len(d))
+            blocks[loc] = d
+            stream.append(Range(loc, n, len(d)))
+            n += len(d)
+        af = ArvadosFile(ArvadosFileReaderTestCase.MockParent(blocks, nocache), stream=stream, segments=[Range(1, 0, 3), Range(6, 3, 3), Range(11, 6, 3)])
+        return ArvadosFileReader(af, "count.txt")
+
+    def test_read_returns_first_block(self):
+        # read() calls will be aligned on block boundaries - see #3663.
+        sfile = self.make_count_reader(nocache=True)
+        self.assertEqual('123', sfile.read(10))
+
+    def test_successive_reads(self):
+        sfile = self.make_count_reader(nocache=True)
+        for expect in ['123', '456', '789', '']:
+            self.assertEqual(expect, sfile.read(10))
+
+    def test_tell_after_block_read(self):
+        sfile = self.make_count_reader(nocache=True)
+        sfile.read(5)
+        self.assertEqual(3, sfile.tell())
+
+    def test_prefetch(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"2e9ec317e197819358fbc43afca7d837+8": "01234567", "e8dc4081b13434b45189a720b77b6818+8": "abcdefgh"})
+        with WritableCollection(". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:16:count.txt\n", keep_client=keep) as c:
+            r = c.open("count.txt", "r")
+            self.assertEqual("0123", r.read(4))
+        self.assertIn("2e9ec317e197819358fbc43afca7d837+8", keep.requests)
+        self.assertIn("e8dc4081b13434b45189a720b77b6818+8", keep.requests)
+
+    def test__eq__1(self):
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c1:
+            with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c2:
+                self.assertTrue(c1["count1.txt"] == c2["count1.txt"])
+                self.assertFalse(c1["count1.txt"] != c2["count1.txt"])
+
+    def test__eq__2(self):
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c1:
+            with WritableCollection() as c2:
+                with c2.open("count1.txt", "w") as f:
+                    f.write("0123456789")
+
+                self.assertTrue(c1["count1.txt"] == c2["count1.txt"])
+                self.assertFalse(c1["count1.txt"] != c2["count1.txt"])
+
+    def test__ne__(self):
+        with WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c1:
+            with WritableCollection() as c2:
+                with c2.open("count1.txt", "w") as f:
+                    f.write("1234567890")
+
+                self.assertTrue(c1["count1.txt"] != c2["count1.txt"])
+                self.assertFalse(c1["count1.txt"] == c2["count1.txt"])
+
+
+class ArvadosFileReadTestCase(unittest.TestCase, StreamRetryTestMixin):
+    def reader_for(self, coll_name, **kwargs):
+        stream = []
+        segments = []
+        n = 0
+        for d in self.manifest_for(coll_name).split():
+            try:
+                k = KeepLocator(d)
+                segments.append(Range(n, n, k.size))
+                stream.append(Range(d, n, k.size))
+                n += k.size
+            except ValueError:
+                pass
+        col = ReadOnlyCollection(keep_client=self.keep_client())
+        col._my_block_manager().prefetch_enabled = False
+        af = ArvadosFile(col,
+                         stream=stream,
+                         segments=segments)
+        return ArvadosFileReader(af, "test", **kwargs)
+
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return reader.read(byte_count, **kwargs)
+
+
+class ArvadosFileReadFromTestCase(ArvadosFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return reader.readfrom(0, byte_count, **kwargs)
+
+
+class ArvadosFileReadAllTestCase(ArvadosFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return ''.join(reader.readall(**kwargs))
+
+
+class ArvadosFileReadAllDecompressedTestCase(ArvadosFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return ''.join(reader.readall_decompressed(**kwargs))
+
+
+class ArvadosFileReadlinesTestCase(ArvadosFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return ''.join(reader.readlines(**kwargs))
+
+class BlockManagerTest(unittest.TestCase):
+    def test_bufferblock_append(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        blockmanager = arvados.arvfile.BlockManager(keep)
+        bufferblock = blockmanager.alloc_bufferblock()
+        bufferblock.append("foo")
+
+        self.assertEqual(bufferblock.size(), 3)
+        self.assertEqual(bufferblock.buffer_view[0:3], "foo")
+        self.assertEqual(bufferblock.locator(), "acbd18db4cc2f85cedef654fccc4a4d8+3")
+
+        bufferblock.append("bar")
+
+        self.assertEqual(bufferblock.size(), 6)
+        self.assertEqual(bufferblock.buffer_view[0:6], "foobar")
+        self.assertEqual(bufferblock.locator(), "3858f62230ac3c915f300c664312c63f+6")
+
+        bufferblock.set_state(arvados.arvfile.BufferBlock.PENDING)
+        with self.assertRaises(arvados.errors.AssertionError):
+            bufferblock.append("bar")
+
+    def test_bufferblock_dup(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({})
+        blockmanager = arvados.arvfile.BlockManager(keep)
+        bufferblock = blockmanager.alloc_bufferblock()
+        bufferblock.append("foo")
+
+        self.assertEqual(bufferblock.size(), 3)
+        self.assertEqual(bufferblock.buffer_view[0:3], "foo")
+        self.assertEqual(bufferblock.locator(), "acbd18db4cc2f85cedef654fccc4a4d8+3")
+        bufferblock.set_state(arvados.arvfile.BufferBlock.PENDING)
+
+        bufferblock2 = blockmanager.dup_block(bufferblock, None)
+        self.assertNotEqual(bufferblock.blockid, bufferblock2.blockid)
+
+        bufferblock2.append("bar")
+
+        self.assertEqual(bufferblock2.size(), 6)
+        self.assertEqual(bufferblock2.buffer_view[0:6], "foobar")
+        self.assertEqual(bufferblock2.locator(), "3858f62230ac3c915f300c664312c63f+6")
+
+        self.assertEqual(bufferblock.size(), 3)
+        self.assertEqual(bufferblock.buffer_view[0:3], "foo")
+        self.assertEqual(bufferblock.locator(), "acbd18db4cc2f85cedef654fccc4a4d8+3")
+
+    def test_bufferblock_get(self):
+        keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+        blockmanager = arvados.arvfile.BlockManager(keep)
+        bufferblock = blockmanager.alloc_bufferblock()
+        bufferblock.append("foo")
+
+        self.assertEqual(blockmanager.get_block_contents("781e5e245d69b566979b86e28d23f2c7+10", 1), "0123456789")
+        self.assertEqual(blockmanager.get_block_contents(bufferblock.blockid, 1), "foo")
+
+    def test_bufferblock_commit(self):
+        mockkeep = mock.MagicMock()
+        blockmanager = arvados.arvfile.BlockManager(mockkeep)
+        bufferblock = blockmanager.alloc_bufferblock()
+        bufferblock.append("foo")
+        blockmanager.commit_all()
+        self.assertTrue(mockkeep.put.called)
+        self.assertEqual(bufferblock.state(), arvados.arvfile.BufferBlock.COMMITTED)
+        self.assertIsNone(bufferblock.buffer_view)
+
+
+    def test_bufferblock_commit_with_error(self):
+        mockkeep = mock.MagicMock()
+        mockkeep.put.side_effect = arvados.errors.KeepWriteError("fail")
+        blockmanager = arvados.arvfile.BlockManager(mockkeep)
+        bufferblock = blockmanager.alloc_bufferblock()
+        bufferblock.append("foo")
+        with self.assertRaises(arvados.errors.KeepWriteError) as err:
+            blockmanager.commit_all()
+        self.assertEquals(str(err.exception), "Error writing some blocks: acbd18db4cc2f85cedef654fccc4a4d8+3 raised KeepWriteError (fail)")
+        self.assertEqual(bufferblock.state(), arvados.arvfile.BufferBlock.PENDING)
index dbbe3f5e73deca582b65f42900f8181e52a63a02..a0cdb0dc2c9176a5f1010d5388b3e0df0b10f36f 100644 (file)
@@ -14,6 +14,9 @@ import unittest
 
 import run_test_server
 import arvados_testutil as tutil
+from arvados.ranges import Range, LocatorAndRange
+from arvados.collection import import_manifest, export_manifest, ReadOnlyCollection, WritableCollection
+from arvados.arvfile import SYNC_EXPLICIT
 
 class TestResumableWriter(arvados.ResumableCollectionWriter):
     KEEP_BLOCK_SIZE = 1024  # PUT to Keep every 1K.
@@ -210,97 +213,97 @@ class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
         self.assertEqual(arvados.CollectionReader(m8, self.api_client).manifest_text(normalize=True), m8)
 
     def test_locators_and_ranges(self):
-        blocks2 = [['a', 10, 0],
-                  ['b', 10, 10],
-                  ['c', 10, 20],
-                  ['d', 10, 30],
-                  ['e', 10, 40],
-                  ['f', 10, 50]]
-
-        self.assertEqual(arvados.locators_and_ranges(blocks2,  2,  2), [['a', 10, 2, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 12, 2), [['b', 10, 2, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 22, 2), [['c', 10, 2, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 32, 2), [['d', 10, 2, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 42, 2), [['e', 10, 2, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 52, 2), [['f', 10, 2, 2]])
+        blocks2 = [Range('a', 0, 10),
+                   Range('b', 10, 10),
+                   Range('c', 20, 10),
+                   Range('d', 30, 10),
+                   Range('e', 40, 10),
+                   Range('f', 50, 10)]
+
+        self.assertEqual(arvados.locators_and_ranges(blocks2,  2,  2), [LocatorAndRange('a', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 12, 2), [LocatorAndRange('b', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 22, 2), [LocatorAndRange('c', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 32, 2), [LocatorAndRange('d', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 42, 2), [LocatorAndRange('e', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 52, 2), [LocatorAndRange('f', 10, 2, 2)])
         self.assertEqual(arvados.locators_and_ranges(blocks2, 62, 2), [])
         self.assertEqual(arvados.locators_and_ranges(blocks2, -2, 2), [])
 
-        self.assertEqual(arvados.locators_and_ranges(blocks2,  0,  2), [['a', 10, 0, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 10, 2), [['b', 10, 0, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 20, 2), [['c', 10, 0, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 30, 2), [['d', 10, 0, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 40, 2), [['e', 10, 0, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 50, 2), [['f', 10, 0, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2,  0,  2), [LocatorAndRange('a', 10, 0, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 10, 2), [LocatorAndRange('b', 10, 0, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 20, 2), [LocatorAndRange('c', 10, 0, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 30, 2), [LocatorAndRange('d', 10, 0, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 40, 2), [LocatorAndRange('e', 10, 0, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 50, 2), [LocatorAndRange('f', 10, 0, 2)])
         self.assertEqual(arvados.locators_and_ranges(blocks2, 60, 2), [])
         self.assertEqual(arvados.locators_and_ranges(blocks2, -2, 2), [])
 
-        self.assertEqual(arvados.locators_and_ranges(blocks2,  9,  2), [['a', 10, 9, 1], ['b', 10, 0, 1]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 19, 2), [['b', 10, 9, 1], ['c', 10, 0, 1]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 29, 2), [['c', 10, 9, 1], ['d', 10, 0, 1]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 39, 2), [['d', 10, 9, 1], ['e', 10, 0, 1]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 49, 2), [['e', 10, 9, 1], ['f', 10, 0, 1]])
-        self.assertEqual(arvados.locators_and_ranges(blocks2, 59, 2), [['f', 10, 9, 1]])
-
-
-        blocks3 = [['a', 10, 0],
-                  ['b', 10, 10],
-                  ['c', 10, 20],
-                  ['d', 10, 30],
-                  ['e', 10, 40],
-                  ['f', 10, 50],
-                  ['g', 10, 60]]
-
-        self.assertEqual(arvados.locators_and_ranges(blocks3,  2,  2), [['a', 10, 2, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks3, 12, 2), [['b', 10, 2, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks3, 22, 2), [['c', 10, 2, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks3, 32, 2), [['d', 10, 2, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks3, 42, 2), [['e', 10, 2, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks3, 52, 2), [['f', 10, 2, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks3, 62, 2), [['g', 10, 2, 2]])
-
-
-        blocks = [['a', 10, 0],
-                  ['b', 15, 10],
-                  ['c', 5, 25]]
+        self.assertEqual(arvados.locators_and_ranges(blocks2,  9,  2), [LocatorAndRange('a', 10, 9, 1), LocatorAndRange('b', 10, 0, 1)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 19, 2), [LocatorAndRange('b', 10, 9, 1), LocatorAndRange('c', 10, 0, 1)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 29, 2), [LocatorAndRange('c', 10, 9, 1), LocatorAndRange('d', 10, 0, 1)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 39, 2), [LocatorAndRange('d', 10, 9, 1), LocatorAndRange('e', 10, 0, 1)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 49, 2), [LocatorAndRange('e', 10, 9, 1), LocatorAndRange('f', 10, 0, 1)])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 59, 2), [LocatorAndRange('f', 10, 9, 1)])
+
+
+        blocks3 = [Range('a', 0, 10),
+                  Range('b', 10, 10),
+                  Range('c', 20, 10),
+                  Range('d', 30, 10),
+                  Range('e', 40, 10),
+                  Range('f', 50, 10),
+                   Range('g', 60, 10)]
+
+        self.assertEqual(arvados.locators_and_ranges(blocks3,  2,  2), [LocatorAndRange('a', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 12, 2), [LocatorAndRange('b', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 22, 2), [LocatorAndRange('c', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 32, 2), [LocatorAndRange('d', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 42, 2), [LocatorAndRange('e', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 52, 2), [LocatorAndRange('f', 10, 2, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 62, 2), [LocatorAndRange('g', 10, 2, 2)])
+
+
+        blocks = [Range('a', 0, 10),
+                  Range('b', 10, 15),
+                  Range('c', 25, 5)]
         self.assertEqual(arvados.locators_and_ranges(blocks, 1, 0), [])
-        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 5), [['a', 10, 0, 5]])
-        self.assertEqual(arvados.locators_and_ranges(blocks, 3, 5), [['a', 10, 3, 5]])
-        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 10), [['a', 10, 0, 10]])
-
-        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 11), [['a', 10, 0, 10],
-                                                                      ['b', 15, 0, 1]])
-        self.assertEqual(arvados.locators_and_ranges(blocks, 1, 11), [['a', 10, 1, 9],
-                                                                      ['b', 15, 0, 2]])
-        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 25), [['a', 10, 0, 10],
-                                                                      ['b', 15, 0, 15]])
-
-        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 30), [['a', 10, 0, 10],
-                                                                      ['b', 15, 0, 15],
-                                                                      ['c', 5, 0, 5]])
-        self.assertEqual(arvados.locators_and_ranges(blocks, 1, 30), [['a', 10, 1, 9],
-                                                                      ['b', 15, 0, 15],
-                                                                      ['c', 5, 0, 5]])
-        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 31), [['a', 10, 0, 10],
-                                                                      ['b', 15, 0, 15],
-                                                                      ['c', 5, 0, 5]])
-
-        self.assertEqual(arvados.locators_and_ranges(blocks, 15, 5), [['b', 15, 5, 5]])
-
-        self.assertEqual(arvados.locators_and_ranges(blocks, 8, 17), [['a', 10, 8, 2],
-                                                                      ['b', 15, 0, 15]])
-
-        self.assertEqual(arvados.locators_and_ranges(blocks, 8, 20), [['a', 10, 8, 2],
-                                                                      ['b', 15, 0, 15],
-                                                                      ['c', 5, 0, 3]])
-
-        self.assertEqual(arvados.locators_and_ranges(blocks, 26, 2), [['c', 5, 1, 2]])
-
-        self.assertEqual(arvados.locators_and_ranges(blocks, 9, 15), [['a', 10, 9, 1],
-                                                                      ['b', 15, 0, 14]])
-        self.assertEqual(arvados.locators_and_ranges(blocks, 10, 15), [['b', 15, 0, 15]])
-        self.assertEqual(arvados.locators_and_ranges(blocks, 11, 15), [['b', 15, 1, 14],
-                                                                       ['c', 5, 0, 1]])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 5), [LocatorAndRange('a', 10, 0, 5)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 3, 5), [LocatorAndRange('a', 10, 3, 5)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 10), [LocatorAndRange('a', 10, 0, 10)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 11), [LocatorAndRange('a', 10, 0, 10),
+                                                                      LocatorAndRange('b', 15, 0, 1)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 1, 11), [LocatorAndRange('a', 10, 1, 9),
+                                                                      LocatorAndRange('b', 15, 0, 2)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 25), [LocatorAndRange('a', 10, 0, 10),
+                                                                      LocatorAndRange('b', 15, 0, 15)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 30), [LocatorAndRange('a', 10, 0, 10),
+                                                                      LocatorAndRange('b', 15, 0, 15),
+                                                                      LocatorAndRange('c', 5, 0, 5)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 1, 30), [LocatorAndRange('a', 10, 1, 9),
+                                                                      LocatorAndRange('b', 15, 0, 15),
+                                                                      LocatorAndRange('c', 5, 0, 5)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 31), [LocatorAndRange('a', 10, 0, 10),
+                                                                      LocatorAndRange('b', 15, 0, 15),
+                                                                      LocatorAndRange('c', 5, 0, 5)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 15, 5), [LocatorAndRange('b', 15, 5, 5)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 8, 17), [LocatorAndRange('a', 10, 8, 2),
+                                                                      LocatorAndRange('b', 15, 0, 15)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 8, 20), [LocatorAndRange('a', 10, 8, 2),
+                                                                      LocatorAndRange('b', 15, 0, 15),
+                                                                      LocatorAndRange('c', 5, 0, 3)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 26, 2), [LocatorAndRange('c', 5, 1, 2)])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 9, 15), [LocatorAndRange('a', 10, 9, 1),
+                                                                      LocatorAndRange('b', 15, 0, 14)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 10, 15), [LocatorAndRange('b', 15, 0, 15)])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 11, 15), [LocatorAndRange('b', 15, 1, 14),
+                                                                       LocatorAndRange('c', 5, 0, 1)])
 
     class MockKeep(object):
         def __init__(self, content, num_retries=0):
@@ -707,23 +710,26 @@ class CollectionWriterTestCase(unittest.TestCase, CollectionTestMixin):
 
     def test_write_insufficient_replicas_via_disks(self):
         client = mock.MagicMock(name='api_client')
-        self.mock_keep_services(client, status=200, service_type='disk', count=2)
-        writer = self.foo_writer(api_client=client, replication=3)
         with self.mock_keep(
                 None, 200, 200,
                 **{'x-keep-replicas-stored': 1}) as keepmock:
+            self.mock_keep_services(client, status=200, service_type='disk', count=2)
+            writer = self.foo_writer(api_client=client, replication=3)
             with self.assertRaises(arvados.errors.KeepWriteError):
                 writer.manifest_text()
 
     def test_write_three_replicas(self):
         client = mock.MagicMock(name='api_client')
-        self.mock_keep_services(client, status=200, service_type='disk', count=6)
-        writer = self.foo_writer(api_client=client, replication=3)
         with self.mock_keep(
                 None, 500, 500, 500, 200, 200, 200,
                 **{'x-keep-replicas-stored': 1}) as keepmock:
+            self.mock_keep_services(client, status=200, service_type='disk', count=6)
+            writer = self.foo_writer(api_client=client, replication=3)
             writer.manifest_text()
-            self.assertEqual(6, keepmock.call_count)
+            # keepmock is the mock session constructor; keepmock.return_value
+            # is the mock session object, and keepmock.return_value.put is the
+            # actual mock method of interest.
+            self.assertEqual(6, keepmock.return_value.put.call_count)
 
     def test_write_whole_collection_through_retries(self):
         writer = self.foo_writer(num_retries=2)
@@ -764,15 +770,14 @@ class CollectionWriterTestCase(unittest.TestCase, CollectionTestMixin):
 
     def test_open_flush(self):
         client = self.api_client_mock()
-        writer = arvados.CollectionWriter(client)
-        with writer.open('flush_test') as out_file:
-            out_file.write('flush1')
-            data_loc1 = hashlib.md5('flush1').hexdigest() + '+6'
-            with self.mock_keep(data_loc1, 200) as keep_mock:
+        data_loc1 = hashlib.md5('flush1').hexdigest() + '+6'
+        data_loc2 = hashlib.md5('flush2').hexdigest() + '+6'
+        with self.mock_keep((data_loc1, 200), (data_loc2, 200)) as keep_mock:
+            writer = arvados.CollectionWriter(client)
+            with writer.open('flush_test') as out_file:
+                out_file.write('flush1')
                 out_file.flush()
-            out_file.write('flush2')
-            data_loc2 = hashlib.md5('flush2').hexdigest() + '+6'
-        with self.mock_keep(data_loc2, 200) as keep_mock:
+                out_file.write('flush2')
             self.assertEqual(". {} {} 0:12:flush_test\n".format(data_loc1,
                                                                 data_loc2),
                              writer.manifest_text())
@@ -791,15 +796,14 @@ class CollectionWriterTestCase(unittest.TestCase, CollectionTestMixin):
 
     def test_two_opens_two_streams(self):
         client = self.api_client_mock()
-        writer = arvados.CollectionWriter(client)
-        with writer.open('file') as out_file:
-            out_file.write('file')
-            data_loc1 = hashlib.md5('file').hexdigest() + '+4'
-        with self.mock_keep(data_loc1, 200) as keep_mock:
+        data_loc1 = hashlib.md5('file').hexdigest() + '+4'
+        data_loc2 = hashlib.md5('indir').hexdigest() + '+5'
+        with self.mock_keep((data_loc1, 200), (data_loc2, 200)) as keep_mock:
+            writer = arvados.CollectionWriter(client)
+            with writer.open('file') as out_file:
+                out_file.write('file')
             with writer.open('./dir', 'indir') as out_file:
                 out_file.write('indir')
-                data_loc2 = hashlib.md5('indir').hexdigest() + '+5'
-        with self.mock_keep(data_loc2, 200) as keep_mock:
             expected = ". {} 0:4:file\n./dir {} 0:5:indir\n".format(
                 data_loc1, data_loc2)
             self.assertEqual(expected, writer.manifest_text())
@@ -811,5 +815,326 @@ class CollectionWriterTestCase(unittest.TestCase, CollectionTestMixin):
         self.assertRaises(arvados.errors.AssertionError, writer.open, 'two')
 
 
+class NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):
+    def test_import_export_manifest(self):
+        m1 = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
+"""
+        self.assertEqual(". 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:127:md5sum.txt\n", export_manifest(import_manifest(m1)))
+
+    def test_init_manifest(self):
+        m1 = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
+"""
+        self.assertEqual(". 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:127:md5sum.txt\n", export_manifest(ReadOnlyCollection(m1)))
+
+
+    def test_remove(self):
+        c = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n')
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n", export_manifest(c))
+        self.assertIn("count1.txt", c)
+        c.remove("count1.txt")
+        self.assertNotIn("count1.txt", c)
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n", export_manifest(c))
+
+    def test_remove_in_subdir(self):
+        c = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+        c.remove("foo/count2.txt")
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n", export_manifest(c))
+
+    def test_remove_empty_subdir(self):
+        c = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+        c.remove("foo/count2.txt")
+        c.remove("foo")
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n", export_manifest(c))
+
+    def test_remove_nonempty_subdir(self):
+        c = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+        with self.assertRaises(IOError):
+            c.remove("foo")
+        c.remove("foo", recursive=True)
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n", export_manifest(c))
+
+    def test_copy_to_dir1(self):
+        c = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c.copy("count1.txt", "foo/count2.txt")
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n", export_manifest(c))
+
+    def test_copy_to_dir2(self):
+        c = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c.copy("count1.txt", "foo")
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n", export_manifest(c))
+
+    def test_copy_to_dir2(self):
+        c = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c.copy("count1.txt", "foo/")
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n", export_manifest(c))
+
+    def test_copy_file(self):
+        c = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c.copy("count1.txt", "count2.txt")
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n", c.manifest_text())
+
+    def test_clone(self):
+        c = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+        cl = c.clone()
+        self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n", export_manifest(cl))
+
+    def test_diff1(self):
+        c1 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c2 = WritableCollection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+        d = c2.diff(c1)
+        self.assertEqual(d, [('del', './count2.txt', c2["count2.txt"]),
+                             ('add', './count1.txt', c1["count1.txt"])])
+        d = c1.diff(c2)
+        self.assertEqual(d, [('del', './count1.txt', c1["count1.txt"]),
+                             ('add', './count2.txt', c2["count2.txt"])])
+        self.assertNotEqual(c1.manifest_text(), c2.manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+    def test_diff2(self):
+        c1 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c2 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        d = c2.diff(c1)
+        self.assertEqual(d, [])
+        d = c1.diff(c2)
+        self.assertEqual(d, [])
+
+        self.assertEqual(c1.manifest_text(), c2.manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+    def test_diff3(self):
+        c1 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c2 = WritableCollection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt\n')
+        d = c2.diff(c1)
+        self.assertEqual(d, [('mod', './count1.txt', c2["count1.txt"], c1["count1.txt"])])
+        d = c1.diff(c2)
+        self.assertEqual(d, [('mod', './count1.txt', c1["count1.txt"], c2["count1.txt"])])
+
+        self.assertNotEqual(c1.manifest_text(), c2.manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+    def test_diff4(self):
+        c1 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c2 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt 10:20:count2.txt\n')
+        d = c2.diff(c1)
+        self.assertEqual(d, [('del', './count2.txt', c2["count2.txt"])])
+        d = c1.diff(c2)
+        self.assertEqual(d, [('add', './count2.txt', c2["count2.txt"])])
+
+        self.assertNotEqual(c1.manifest_text(), c2.manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+    def test_diff5(self):
+        c1 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c2 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+        d = c2.diff(c1)
+        self.assertEqual(d, [('del', './foo', c2["foo"])])
+        d = c1.diff(c2)
+        self.assertEqual(d, [('add', './foo', c2["foo"])])
+
+        self.assertNotEqual(c1.manifest_text(), c2.manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+    def test_diff6(self):
+        c1 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+        c2 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:3:count3.txt\n')
+
+        d = c2.diff(c1)
+        self.assertEqual(d, [('del', './foo/count3.txt', c2.find("foo/count3.txt")),
+                             ('add', './foo/count2.txt', c1.find("foo/count2.txt"))])
+        d = c1.diff(c2)
+        self.assertEqual(d, [('del', './foo/count2.txt', c1.find("foo/count2.txt")),
+                             ('add', './foo/count3.txt', c2.find("foo/count3.txt"))])
+
+        self.assertNotEqual(c1.manifest_text(), c2.manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+    def test_diff7(self):
+        c1 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+        c2 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:3:foo\n')
+        d = c2.diff(c1)
+        self.assertEqual(d, [('mod', './foo', c2["foo"], c1["foo"])])
+        d = c1.diff(c2)
+        self.assertEqual(d, [('mod', './foo', c1["foo"], c2["foo"])])
+
+        self.assertNotEqual(c1.manifest_text(), c2.manifest_text())
+        c1.apply(d)
+        self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+    def test_conflict1(self):
+        c1 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+        c2 = WritableCollection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+        d = c1.diff(c2)
+        self.assertEqual(d, [('del', './count1.txt', c1["count1.txt"]),
+                             ('add', './count2.txt', c2["count2.txt"])])
+        with c1.open("count1.txt", "w") as f:
+            f.write("zzzzz")
+
+        # c1 changed, so it should not be deleted.
+        c1.apply(d)
+        self.assertEqual(c1.manifest_text(), ". 95ebc3c7b3b9f1d2c40fec14415d3cb8+5 5348b82a029fd9e971a811ce1f71360b+43 0:5:count1.txt 5:10:count2.txt\n")
+
+    def test_conflict2(self):
+        c1 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt')
+        c2 = WritableCollection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt')
+        d = c1.diff(c2)
+        self.assertEqual(d, [('mod', './count1.txt', c1["count1.txt"], c2["count1.txt"])])
+        with c1.open("count1.txt", "w") as f:
+            f.write("zzzzz")
+
+        # c1 changed, so c2 mod will go to a conflict file
+        c1.apply(d)
+        self.assertTrue(re.match(r"\. 95ebc3c7b3b9f1d2c40fec14415d3cb8\+5 5348b82a029fd9e971a811ce1f71360b\+43 0:5:count1.txt 5:10:count1.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$",
+                                 c1.manifest_text()))
+
+    def test_conflict3(self):
+        c1 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+        c2 = WritableCollection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt\n')
+        d = c1.diff(c2)
+        self.assertEqual(d, [('del', './count2.txt', c1["count2.txt"]),
+                             ('add', './count1.txt', c2["count1.txt"])])
+        with c1.open("count1.txt", "w") as f:
+            f.write("zzzzz")
+
+        # c1 added count1.txt, so c2 add will go to a conflict file
+        c1.apply(d)
+        self.assertTrue(re.match(r"\. 95ebc3c7b3b9f1d2c40fec14415d3cb8\+5 5348b82a029fd9e971a811ce1f71360b\+43 0:5:count1.txt 5:10:count1.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$",
+                                 c1.manifest_text()))
+
+    def test_conflict4(self):
+        c1 = WritableCollection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt')
+        c2 = WritableCollection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt')
+        d = c1.diff(c2)
+        self.assertEqual(d, [('mod', './count1.txt', c1["count1.txt"], c2["count1.txt"])])
+        c1.remove("count1.txt")
+
+        # c1 deleted, so c2 mod will go to a conflict file
+        c1.apply(d)
+        self.assertTrue(re.match(r"\. 5348b82a029fd9e971a811ce1f71360b\+43 0:10:count1.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$",
+                                 c1.manifest_text()))
+
+    def test_notify1(self):
+        c1 = WritableCollection()
+        events = []
+        c1.subscribe(lambda event, collection, name, item: events.append((event, collection, name, item)))
+        f = c1.open("foo.txt", "w")
+        self.assertEqual(events[0], (arvados.collection.ADD, c1, "foo.txt", f.arvadosfile))
+
+    def test_open_w(self):
+        c1 = WritableCollection(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n")
+        self.assertEqual(c1["count1.txt"].size(), 10)
+        c1.open("count1.txt", "w").close()
+        self.assertEqual(c1["count1.txt"].size(), 0)
+
+
+class CollectionCreateUpdateTest(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {}
+
+    def test_create_and_save(self):
+        c = arvados.collection.createWritableCollection("hello world")
+        self.assertEquals(c.portable_data_hash(), "d41d8cd98f00b204e9800998ecf8427e+0")
+        self.assertEquals(c.api_response()["portable_data_hash"], "d41d8cd98f00b204e9800998ecf8427e+0" )
+
+        with c.open("count.txt", "w") as f:
+            f.write("0123456789")
+
+        self.assertEquals(c.manifest_text(), ". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n")
+
+        c.save()
+
+        c2 = arvados.api().collections().get(uuid=c._manifest_locator).execute()
+        self.assertTrue(re.match(r"^\. 781e5e245d69b566979b86e28d23f2c7\+10\+A[a-f0-9]{40}@[a-f0-9]{8} 0:10:count.txt$",
+                                 c2["manifest_text"]))
+
+    def test_create_diff_apply(self):
+        c1 = arvados.collection.createWritableCollection("hello world")
+        self.assertEquals(c1.portable_data_hash(), "d41d8cd98f00b204e9800998ecf8427e+0")
+        self.assertEquals(c1.api_response()["portable_data_hash"], "d41d8cd98f00b204e9800998ecf8427e+0" )
+        with c1.open("count.txt", "w") as f:
+            f.write("0123456789")
+
+        self.assertEquals(c1.manifest_text(), ". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n")
+
+        c1.save()
+
+        c2 = WritableCollection(c1._manifest_locator)
+        with c2.open("count.txt", "w") as f:
+            f.write("abcdefg")
+
+        diff = c1.diff(c2)
+
+        self.assertEqual(diff[0], (arvados.collection.MOD, u'./count.txt', c1["count.txt"], c2["count.txt"]))
+
+        c1.apply(diff)
+        self.assertEqual(c1.portable_data_hash(), c2.portable_data_hash())
+
+    def test_diff_apply_with_token(self):
+        baseline = ReadOnlyCollection(". 781e5e245d69b566979b86e28d23f2c7+10+A715fd31f8111894f717eb1003c1b0216799dd9ec@54f5dd1a 0:10:count.txt\n")
+        c = WritableCollection(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n")
+        other = ReadOnlyCollection(". 7ac66c0f148de9519b8bd264312c4d64+7+A715fd31f8111894f717eb1003c1b0216799dd9ec@54f5dd1a 0:7:count.txt\n")
+
+        diff = baseline.diff(other)
+        self.assertEqual(diff, [('mod', u'./count.txt', c["count.txt"], other["count.txt"])])
+
+        c.apply(diff)
+
+        self.assertEqual(c.manifest_text(), ". 7ac66c0f148de9519b8bd264312c4d64+7+A715fd31f8111894f717eb1003c1b0216799dd9ec@54f5dd1a 0:7:count.txt\n")
+
+
+    def test_create_and_update(self):
+        c1 = arvados.collection.createWritableCollection("hello world")
+        self.assertEquals(c1.portable_data_hash(), "d41d8cd98f00b204e9800998ecf8427e+0")
+        self.assertEquals(c1.api_response()["portable_data_hash"], "d41d8cd98f00b204e9800998ecf8427e+0" )
+        with c1.open("count.txt", "w") as f:
+            f.write("0123456789")
+
+        self.assertEquals(c1.manifest_text(), ". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n")
+
+        c1.save()
+
+        c2 = arvados.collection.WritableCollection(c1._manifest_locator)
+        with c2.open("count.txt", "w") as f:
+            f.write("abcdefg")
+
+        c2.save()
+
+        self.assertNotEqual(c1.portable_data_hash(), c2.portable_data_hash())
+        c1.update()
+        self.assertEqual(c1.portable_data_hash(), c2.portable_data_hash())
+
+
+    def test_create_and_update_with_conflict(self):
+        c1 = arvados.collection.createWritableCollection("hello world")
+        self.assertEquals(c1.portable_data_hash(), "d41d8cd98f00b204e9800998ecf8427e+0")
+        self.assertEquals(c1.api_response()["portable_data_hash"], "d41d8cd98f00b204e9800998ecf8427e+0" )
+        with c1.open("count.txt", "w") as f:
+            f.write("0123456789")
+
+        self.assertEquals(c1.manifest_text(), ". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n")
+
+        c1.save()
+        with c1.open("count.txt", "w") as f:
+            f.write("XYZ")
+
+        c2 = arvados.collection.WritableCollection(c1._manifest_locator)
+        with c2.open("count.txt", "w") as f:
+            f.write("abcdefg")
+
+        c2.save()
+
+        c1.update()
+        self.assertTrue(re.match(r"\. e65075d550f9b5bf9992fa1d71a131be\+3 7ac66c0f148de9519b8bd264312c4d64\+7\+A[a-f0-9]{40}@[a-f0-9]{8} 0:3:count.txt 3:7:count.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$", c1.manifest_text()))
+
+
 if __name__ == '__main__':
     unittest.main()
index 6d4d3cd8b816ea3854151285cd46cdebf60f5e63..1baf1357ccf8dbfdc78960d4a63b326595e249c5 100644 (file)
@@ -258,57 +258,57 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
 
     def test_get_timeout(self):
         api_client = self.mock_keep_services(count=1)
-        keep_client = arvados.KeepClient(api_client=api_client)
         force_timeout = [socket.timeout("timed out")]
-        with mock.patch('requests.get', side_effect=force_timeout) as mock_request:
+        with tutil.mock_get(force_timeout) as mock_session:
+            keep_client = arvados.KeepClient(api_client=api_client)
             with self.assertRaises(arvados.errors.KeepReadError):
                 keep_client.get('ffffffffffffffffffffffffffffffff')
-            self.assertTrue(mock_request.called)
+            self.assertTrue(mock_session.return_value.get.called)
             self.assertEqual(
                 arvados.KeepClient.DEFAULT_TIMEOUT,
-                mock_request.call_args[1]['timeout'])
+                mock_session.return_value.get.call_args[1]['timeout'])
 
     def test_put_timeout(self):
         api_client = self.mock_keep_services(count=1)
-        keep_client = arvados.KeepClient(api_client=api_client)
         force_timeout = [socket.timeout("timed out")]
-        with mock.patch('requests.put', side_effect=force_timeout) as mock_request:
+        with tutil.mock_put(force_timeout) as mock_session:
+            keep_client = arvados.KeepClient(api_client=api_client)
             with self.assertRaises(arvados.errors.KeepWriteError):
                 keep_client.put('foo')
-            self.assertTrue(mock_request.called)
+            self.assertTrue(mock_session.return_value.put.called)
             self.assertEqual(
                 arvados.KeepClient.DEFAULT_TIMEOUT,
-                mock_request.call_args[1]['timeout'])
+                mock_session.return_value.put.call_args[1]['timeout'])
 
     def test_proxy_get_timeout(self):
         # Force a timeout, verifying that the requests.get or
         # requests.put method was called with the proxy_timeout
         # setting rather than the default timeout.
         api_client = self.mock_keep_services(service_type='proxy', count=1)
-        keep_client = arvados.KeepClient(api_client=api_client)
         force_timeout = [socket.timeout("timed out")]
-        with mock.patch('requests.get', side_effect=force_timeout) as mock_request:
+        with tutil.mock_get(force_timeout) as mock_session:
+            keep_client = arvados.KeepClient(api_client=api_client)
             with self.assertRaises(arvados.errors.KeepReadError):
                 keep_client.get('ffffffffffffffffffffffffffffffff')
-            self.assertTrue(mock_request.called)
+            self.assertTrue(mock_session.return_value.get.called)
             self.assertEqual(
                 arvados.KeepClient.DEFAULT_PROXY_TIMEOUT,
-                mock_request.call_args[1]['timeout'])
+                mock_session.return_value.get.call_args[1]['timeout'])
 
     def test_proxy_put_timeout(self):
         # Force a timeout, verifying that the requests.get or
         # requests.put method was called with the proxy_timeout
         # setting rather than the default timeout.
         api_client = self.mock_keep_services(service_type='proxy', count=1)
-        keep_client = arvados.KeepClient(api_client=api_client)
         force_timeout = [socket.timeout("timed out")]
-        with mock.patch('requests.put', side_effect=force_timeout) as mock_request:
+        with tutil.mock_put(force_timeout) as mock_session:
+            keep_client = arvados.KeepClient(api_client=api_client)
             with self.assertRaises(arvados.errors.KeepWriteError):
                 keep_client.put('foo')
-            self.assertTrue(mock_request.called)
+            self.assertTrue(mock_session.return_value.put.called)
             self.assertEqual(
                 arvados.KeepClient.DEFAULT_PROXY_TIMEOUT,
-                mock_request.call_args[1]['timeout'])
+                mock_session.return_value.put.call_args[1]['timeout'])
 
     def test_probe_order_reference_set(self):
         # expected_order[i] is the probe order for
@@ -414,11 +414,11 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
 
     def check_errors_from_last_retry(self, verb, exc_class):
         api_client = self.mock_keep_services(count=2)
-        keep_client = arvados.KeepClient(api_client=api_client)
         req_mock = getattr(tutil, 'mock_{}_responses'.format(verb))(
             "retry error reporting test", 500, 500, 403, 403)
         with req_mock, tutil.skip_sleep, \
                 self.assertRaises(exc_class) as err_check:
+            keep_client = arvados.KeepClient(api_client=api_client)
             getattr(keep_client, verb)('d41d8cd98f00b204e9800998ecf8427e+0',
                                        num_retries=3)
         self.assertEqual([403, 403], [
@@ -435,9 +435,9 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
         data = 'partial failure test'
         data_loc = '{}+{}'.format(hashlib.md5(data).hexdigest(), len(data))
         api_client = self.mock_keep_services(count=3)
-        keep_client = arvados.KeepClient(api_client=api_client)
         with tutil.mock_put_responses(data_loc, 200, 500, 500) as req_mock, \
                 self.assertRaises(arvados.errors.KeepWriteError) as exc_check:
+            keep_client = arvados.KeepClient(api_client=api_client)
             keep_client.put(data)
         self.assertEqual(2, len(exc_check.exception.service_errors()))
 
@@ -541,17 +541,13 @@ class KeepClientRetryGetTestCase(KeepClientRetryTestMixin, unittest.TestCase):
             self.check_success(locator=self.HINTED_LOCATOR)
 
     def test_try_next_server_after_timeout(self):
-        side_effects = [
-            socket.timeout("timed out"),
-            tutil.fake_requests_response(200, self.DEFAULT_EXPECT)]
-        with mock.patch('requests.get',
-                        side_effect=iter(side_effects)):
+        with tutil.mock_get([
+                socket.timeout("timed out"),
+                tutil.fake_requests_response(200, self.DEFAULT_EXPECT)]):
             self.check_success(locator=self.HINTED_LOCATOR)
 
     def test_retry_data_with_wrong_checksum(self):
-        side_effects = (tutil.fake_requests_response(200, s)
-                        for s in ['baddata', self.TEST_DATA])
-        with mock.patch('requests.get', side_effect=side_effects):
+        with tutil.mock_get((tutil.fake_requests_response(200, s) for s in ['baddata', self.TEST_DATA])):
             self.check_success(locator=self.HINTED_LOCATOR)
 
 
index 08a3d28a5d11d79c0752a6030bbc54c5db66b0a5..11ee69493c771d84012a2348b8ffb1eab7b483e5 100644 (file)
@@ -6,9 +6,10 @@ import io
 import mock
 import os
 import unittest
+import hashlib
 
 import arvados
-from arvados import StreamReader, StreamFileReader
+from arvados import StreamReader, StreamFileReader, Range
 
 import arvados_testutil as tutil
 import run_test_server
@@ -16,7 +17,7 @@ import run_test_server
 class StreamFileReaderTestCase(unittest.TestCase):
     def make_count_reader(self):
         stream = tutil.MockStreamReader('.', '01234', '34567', '67890')
-        return StreamFileReader(stream, [[1, 3, 0], [6, 3, 3], [11, 3, 6]],
+        return StreamFileReader(stream, [Range(1, 0, 3), Range(6, 3, 3), Range(11, 6, 3)],
                                 'count.txt')
 
     def test_read_returns_first_block(self):
@@ -102,7 +103,7 @@ class StreamFileReaderTestCase(unittest.TestCase):
 
     def make_newlines_reader(self):
         stream = tutil.MockStreamReader('.', 'one\ntwo\n\nth', 'ree\nfour\n\n')
-        return StreamFileReader(stream, [[0, 11, 0], [11, 10, 11]], 'count.txt')
+        return StreamFileReader(stream, [Range(0, 0, 11), Range(11, 11, 10)], 'count.txt')
 
     def check_lines(self, actual):
         self.assertEqual(['one\n', 'two\n', '\n', 'three\n', 'four\n', '\n'],
@@ -140,7 +141,7 @@ class StreamFileReaderTestCase(unittest.TestCase):
     def test_name_attribute(self):
         # Test both .name and .name() (for backward compatibility)
         stream = tutil.MockStreamReader()
-        sfile = StreamFileReader(stream, [[0, 0, 0]], 'nametest')
+        sfile = StreamFileReader(stream, [Range(0, 0, 0)], 'nametest')
         self.assertEqual('nametest', sfile.name)
         self.assertEqual('nametest', sfile.name())
 
@@ -148,7 +149,7 @@ class StreamFileReaderTestCase(unittest.TestCase):
         test_text = 'decompression\ntest\n'
         test_data = compress_func(test_text)
         stream = tutil.MockStreamReader('.', test_data)
-        reader = StreamFileReader(stream, [[0, len(test_data), 0]],
+        reader = StreamFileReader(stream, [Range(0, 0, len(test_data))],
                                   'test.' + compress_ext)
         self.assertEqual(test_text, ''.join(reader.readall_decompressed()))
 
@@ -183,48 +184,48 @@ class StreamRetryTestMixin(object):
 
     @tutil.skip_sleep
     def test_success_without_retries(self):
-        reader = self.reader_for('bar_file')
         with tutil.mock_get_responses('bar', 200):
+            reader = self.reader_for('bar_file')
             self.assertEqual('bar', self.read_for_test(reader, 3))
 
     @tutil.skip_sleep
     def test_read_no_default_retry(self):
-        reader = self.reader_for('user_agreement')
         with tutil.mock_get_responses('', 500):
+            reader = self.reader_for('user_agreement')
             with self.assertRaises(arvados.errors.KeepReadError):
                 self.read_for_test(reader, 10)
 
     @tutil.skip_sleep
     def test_read_with_instance_retries(self):
-        reader = self.reader_for('foo_file', num_retries=3)
         with tutil.mock_get_responses('foo', 500, 200):
+            reader = self.reader_for('foo_file', num_retries=3)
             self.assertEqual('foo', self.read_for_test(reader, 3))
 
     @tutil.skip_sleep
     def test_read_with_method_retries(self):
-        reader = self.reader_for('foo_file')
         with tutil.mock_get_responses('foo', 500, 200):
+            reader = self.reader_for('foo_file')
             self.assertEqual('foo',
                              self.read_for_test(reader, 3, num_retries=3))
 
     @tutil.skip_sleep
     def test_read_instance_retries_exhausted(self):
-        reader = self.reader_for('bar_file', num_retries=3)
         with tutil.mock_get_responses('bar', 500, 500, 500, 500, 200):
+            reader = self.reader_for('bar_file', num_retries=3)
             with self.assertRaises(arvados.errors.KeepReadError):
                 self.read_for_test(reader, 3)
 
     @tutil.skip_sleep
     def test_read_method_retries_exhausted(self):
-        reader = self.reader_for('bar_file')
         with tutil.mock_get_responses('bar', 500, 500, 500, 500, 200):
+            reader = self.reader_for('bar_file')
             with self.assertRaises(arvados.errors.KeepReadError):
                 self.read_for_test(reader, 3, num_retries=3)
 
     @tutil.skip_sleep
     def test_method_retries_take_precedence(self):
-        reader = self.reader_for('user_agreement', num_retries=10)
         with tutil.mock_get_responses('', 500, 500, 500, 200):
+            reader = self.reader_for('user_agreement', num_retries=10)
             with self.assertRaises(arvados.errors.KeepReadError):
                 self.read_for_test(reader, 10, num_retries=1)
 
@@ -272,6 +273,5 @@ class StreamFileReadlinesTestCase(StreamFileReadTestCase):
     def read_for_test(self, reader, byte_count, **kwargs):
         return ''.join(reader.readlines(**kwargs))
 
-
 if __name__ == '__main__':
     unittest.main()
index 71c4ee5a2c4b713aba669a51605f01736b02bfe0..6850de66cc111982d3048068df32a4171bb6c1e5 100644 (file)
@@ -30,40 +30,6 @@ _logger = logging.getLogger('arvados.arvados_fuse')
 # appear as underscores in the fuse mount.)
 _disallowed_filename_characters = re.compile('[\x00/]')
 
-class SafeApi(object):
-    """Threadsafe wrapper for API object.
-
-    This stores and returns a different api object per thread, because
-    httplib2 which underlies apiclient is not threadsafe.
-    """
-
-    def __init__(self, config):
-        self.host = config.get('ARVADOS_API_HOST')
-        self.api_token = config.get('ARVADOS_API_TOKEN')
-        self.insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE')
-        self.local = threading.local()
-        self.block_cache = arvados.KeepBlockCache()
-
-    def localapi(self):
-        if 'api' not in self.local.__dict__:
-            self.local.api = arvados.api(
-                version='v1',
-                host=self.host, token=self.api_token, insecure=self.insecure)
-        return self.local.api
-
-    def localkeep(self):
-        if 'keep' not in self.local.__dict__:
-            self.local.keep = arvados.KeepClient(api_client=self.localapi(), block_cache=self.block_cache)
-        return self.local.keep
-
-    def __getattr__(self, name):
-        # Proxy nonexistent attributes to the local API client.
-        try:
-            return getattr(self.localapi(), name)
-        except AttributeError:
-            return super(SafeApi, self).__getattr__(name)
-
-
 def convertTime(t):
     """Parse Arvados timestamp to unix time."""
     if not t:
@@ -600,11 +566,6 @@ class ProjectDirectory(Directory):
 
             contents = arvados.util.list_all(self.api.groups().contents,
                                              self.num_retries, uuid=self.uuid)
-            # Name links will be obsolete soon, take this out when there are no more pre-#3036 in use.
-            contents += arvados.util.list_all(
-                self.api.links().list, self.num_retries,
-                filters=[['tail_uuid', '=', self.uuid],
-                         ['link_class', '=', 'name']])
 
         # end with llfuse.lock_released, re-acquire lock
 
@@ -934,5 +895,5 @@ class Operations(llfuse.Operations):
     # arv-mount.
     # The workaround is to implement it with the proper number of parameters,
     # and then everything works out.
-    def create(self, p1, p2, p3, p4, p5):
+    def create(self, inode_parent, name, mode, flags, ctx):
         raise llfuse.FUSEError(errno.EROFS)
index 68cd09c1e83fa443978a11650d505d5aa519bc60..ca65133ce57be7ac97ff9cab4f005c5fe158c959 100755 (executable)
@@ -11,6 +11,7 @@ import time
 
 import arvados.commands._util as arv_cmd
 from arvados_fuse import *
+from arvados.safeapi import ThreadSafeApiCache
 
 logger = logging.getLogger('arvados.arv-mount')
 
@@ -81,7 +82,7 @@ with "--".
     try:
         # Create the request handler
         operations = Operations(os.getuid(), os.getgid(), args.encoding)
-        api = SafeApi(arvados.config)
+        api = ThreadSafeApiCache(arvados.config)
 
         usr = api.users().current().execute(num_retries=args.retries)
         now = time.time()