Merge branch '3198-inode-cache' into 3198-writable-fuse, fix tests.
[arvados.git] / sdk / python / arvados / arvfile.py
index d9d9cd287a3b2a81ae85ec283bc06c840f8f81b2..c0ef5810728e42776d4b57c4f4e6da0b03dd4618 100644 (file)
@@ -2,24 +2,31 @@ import functools
 import os
 import zlib
 import bz2
-from ._ranges import locators_and_ranges, replace_range, Range
-from arvados.retry import retry_method
 import config
 import hashlib
-import hashlib
 import threading
 import Queue
 import copy
 import errno
+import re
+import logging
+
 from .errors import KeepWriteError, AssertionError
 from .keep import KeepLocator
-from _normalize_stream import normalize_stream
+from ._normalize_stream import normalize_stream
+from ._ranges import locators_and_ranges, replace_range, Range
+from .retry import retry_method
+
+MOD = "mod"
+
+_logger = logging.getLogger('arvados.arvfile')
 
 def split(path):
-    """Separate the stream name and file name in a /-separated stream path and
-    return a tuple (stream_name, file_name).
+    """split(path) -> streamname, filename
 
-    If no stream name is available, assume '.'.
+    Separate the stream name and file name in a /-separated stream path and
+    return a tuple (stream_name, file_name).  If no stream name is available,
+    assume '.'.
 
     """
     try:
@@ -58,15 +65,8 @@ class _FileLikeObjectBase(object):
 
 
 class ArvadosFileReaderBase(_FileLikeObjectBase):
-    class _NameAttribute(str):
-        # The Python file API provides a plain .name attribute.
-        # Older SDK provided a name() method.
-        # This class provides both, for maximum compatibility.
-        def __call__(self):
-            return self
-
     def __init__(self, name, mode, num_retries=None):
-        super(ArvadosFileReaderBase, self).__init__(self._NameAttribute(name), mode)
+        super(ArvadosFileReaderBase, self).__init__(name, mode)
         self._filepos = 0L
         self.num_retries = num_retries
         self._readline_cache = (None, None)
@@ -82,7 +82,7 @@ class ArvadosFileReaderBase(_FileLikeObjectBase):
         return re.sub('\.(bz2|gz)$', '', self.name)
 
     @_FileLikeObjectBase._before_close
-    def seek(self, pos, whence=os.SEEK_CUR):
+    def seek(self, pos, whence=os.SEEK_SET):
         if whence == os.SEEK_CUR:
             pos += self._filepos
         elif whence == os.SEEK_END:
@@ -171,8 +171,15 @@ class ArvadosFileReaderBase(_FileLikeObjectBase):
 
 
 class StreamFileReader(ArvadosFileReaderBase):
+    class _NameAttribute(str):
+        # The Python file API provides a plain .name attribute.
+        # Older SDK provided a name() method.
+        # This class provides both, for maximum compatibility.
+        def __call__(self):
+            return self
+
     def __init__(self, stream, segments, name):
-        super(StreamFileReader, self).__init__(name, 'rb', num_retries=stream.num_retries)
+        super(StreamFileReader, self).__init__(self._NameAttribute(name), 'rb', num_retries=stream.num_retries)
         self._stream = stream
         self.segments = segments
 
@@ -194,7 +201,7 @@ class StreamFileReader(ArvadosFileReaderBase):
         available_chunks = locators_and_ranges(self.segments, self._filepos, size)
         if available_chunks:
             lr = available_chunks[0]
-            data = self._stream._readfrom(lr.locator+lr.segment_offset,
+            data = self._stream.readfrom(lr.locator+lr.segment_offset,
                                           lr.segment_size,
                                           num_retries=num_retries)
 
@@ -210,7 +217,7 @@ class StreamFileReader(ArvadosFileReaderBase):
 
         data = []
         for lr in locators_and_ranges(self.segments, start, size):
-            data.append(self._stream._readfrom(lr.locator+lr.segment_offset, lr.segment_size,
+            data.append(self._stream.readfrom(lr.locator+lr.segment_offset, lr.segment_size,
                                               num_retries=num_retries))
         return ''.join(data)
 
@@ -229,8 +236,7 @@ def synchronized(orig_func):
     return synchronized_wrapper
 
 class _BufferBlock(object):
-    """A BufferBlock is a stand-in for a Keep block that is in the process of being
-    written.
+    """A stand-in for a Keep block that is in the process of being written.
 
     Writers can append to it, get the size, and compute the Keep locator.
     There are three valid states:
@@ -304,7 +310,7 @@ class _BufferBlock(object):
                 self.buffer_view = None
                 self.buffer_block = None
         else:
-            raise AssertionError("Invalid state change from %s to %s" % (self.state, state))
+            raise AssertionError("Invalid state change from %s to %s" % (self.state, nextstate))
 
     @synchronized
     def state(self):
@@ -321,6 +327,14 @@ class _BufferBlock(object):
             self._locator = "%s+%i" % (hashlib.md5(self.buffer_view[0:self.write_pointer]).hexdigest(), self.size())
         return self._locator
 
+    @synchronized
+    def clone(self, new_blockid, owner):
+        if self._state == _BufferBlock.COMMITTED:
+            raise AssertionError("Can only duplicate a writable or pending buffer block")
+        bufferblock = _BufferBlock(new_blockid, self.size(), owner)
+        bufferblock.append(self.buffer_view[0:self.size()])
+        return bufferblock
+
 
 class NoopLock(object):
     def __enter__(self):
@@ -335,22 +349,21 @@ class NoopLock(object):
     def release(self):
         pass
 
-SYNC_READONLY = 1
-SYNC_EXPLICIT = 2
-SYNC_LIVE = 3
 
 def must_be_writable(orig_func):
     @functools.wraps(orig_func)
     def must_be_writable_wrapper(self, *args, **kwargs):
-        if self.sync_mode() == SYNC_READONLY:
-            raise IOError((errno.EROFS, "Collection is read only"))
+        if not self.writable():
+            raise IOError(errno.EROFS, "Collection must be writable.")
         return orig_func(self, *args, **kwargs)
     return must_be_writable_wrapper
 
 
 class _BlockManager(object):
-    """BlockManager handles buffer blocks, background block uploads, and background
-    block prefetch for a Collection of ArvadosFiles.
+    """BlockManager handles buffer blocks.
+
+    Also handles background block uploads, and background block prefetch for a
+    Collection of ArvadosFiles.
 
     """
     def __init__(self, keep):
@@ -389,8 +402,7 @@ class _BlockManager(object):
 
     @synchronized
     def dup_block(self, block, owner):
-        """Create a new bufferblock in WRITABLE state, initialized with the content of
-        an existing bufferblock.
+        """Create a new bufferblock initialized with the content of an existing bufferblock.
 
         :block:
           the buffer block to copy.
@@ -400,12 +412,7 @@ class _BlockManager(object):
 
         """
         new_blockid = "bufferblock%i" % len(self._bufferblocks)
-        with block.lock:
-            if block._state == _BufferBlock.COMMITTED:
-                raise AssertionError("Can only duplicate a writable or pending buffer block")
-
-            bufferblock = _BufferBlock(new_blockid, block.size(), owner)
-            bufferblock.append(block.buffer_view[0:block.size()])
+        bufferblock = block.clone(new_blockid, owner)
         self._bufferblocks[bufferblock.blockid] = bufferblock
         return bufferblock
 
@@ -434,11 +441,17 @@ class _BlockManager(object):
         self._prefetch_threads = None
         self._prefetch_queue = None
 
-    def commit_bufferblock(self, block):
+    def commit_bufferblock(self, block, wait):
         """Initiate a background upload of a bufferblock.
 
-        This will block if the upload queue is at capacity, otherwise it will
-        return immediately.
+        :block:
+          The block object to upload
+
+        :wait:
+          If `wait` is True, upload the block synchronously.
+          If `wait` is False, upload the block asynchronously.  This will
+          return immediately unless if the upload queue is at capacity, in
+          which case it will wait on an upload queue slot.
 
         """
 
@@ -450,42 +463,50 @@ class _BlockManager(object):
                     bufferblock = self._put_queue.get()
                     if bufferblock is None:
                         return
+
                     loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes())
                     bufferblock.set_state(_BufferBlock.COMMITTED, loc)
 
                 except Exception as e:
-                    print e
                     self._put_errors.put((bufferblock.locator(), e))
                 finally:
                     if self._put_queue is not None:
                         self._put_queue.task_done()
 
-        with self.lock:
-            if self._put_threads is None:
-                # Start uploader threads.
-
-                # If we don't limit the Queue size, the upload queue can quickly
-                # grow to take up gigabytes of RAM if the writing process is
-                # generating data more quickly than it can be send to the Keep
-                # servers.
-                #
-                # With two upload threads and a queue size of 2, this means up to 4
-                # blocks pending.  If they are full 64 MiB blocks, that means up to
-                # 256 MiB of internal buffering, which is the same size as the
-                # default download block cache in KeepClient.
-                self._put_queue = Queue.Queue(maxsize=2)
-                self._put_errors = Queue.Queue()
-
-                self._put_threads = []
-                for i in xrange(0, self.num_put_threads):
-                    thread = threading.Thread(target=commit_bufferblock_worker, args=(self,))
-                    self._put_threads.append(thread)
-                    thread.daemon = True
-                    thread.start()
+        if block.state() != _BufferBlock.WRITABLE:
+            return
 
-        # Mark the block as PENDING so to disallow any more appends.
-        block.set_state(_BufferBlock.PENDING)
-        self._put_queue.put(block)
+        if wait:
+            block.set_state(_BufferBlock.PENDING)
+            loc = self._keep.put(block.buffer_view[0:block.write_pointer].tobytes())
+            block.set_state(_BufferBlock.COMMITTED, loc)
+        else:
+            with self.lock:
+                if self._put_threads is None:
+                    # Start uploader threads.
+
+                    # If we don't limit the Queue size, the upload queue can quickly
+                    # grow to take up gigabytes of RAM if the writing process is
+                    # generating data more quickly than it can be send to the Keep
+                    # servers.
+                    #
+                    # With two upload threads and a queue size of 2, this means up to 4
+                    # blocks pending.  If they are full 64 MiB blocks, that means up to
+                    # 256 MiB of internal buffering, which is the same size as the
+                    # default download block cache in KeepClient.
+                    self._put_queue = Queue.Queue(maxsize=2)
+                    self._put_errors = Queue.Queue()
+
+                    self._put_threads = []
+                    for i in xrange(0, self.num_put_threads):
+                        thread = threading.Thread(target=commit_bufferblock_worker, args=(self,))
+                        self._put_threads.append(thread)
+                        thread.daemon = True
+                        thread.start()
+
+            # Mark the block as PENDING so to disallow any more appends.
+            block.set_state(_BufferBlock.PENDING)
+            self._put_queue.put(block)
 
     @synchronized
     def get_bufferblock(self, locator):
@@ -523,7 +544,7 @@ class _BlockManager(object):
 
         for k,v in items:
             if v.state() == _BufferBlock.WRITABLE:
-                self.commit_bufferblock(v)
+                v.owner.flush(False)
 
         with self.lock:
             if self._put_queue is not None:
@@ -536,7 +557,7 @@ class _BlockManager(object):
                             err.append(self._put_errors.get(False))
                     except Queue.Empty:
                         pass
-                    raise KeepWriteError("Error writing some blocks", err)
+                    raise KeepWriteError("Error writing some blocks", err, label="block")
 
     def block_prefetch(self, locator):
         """Initiate a background download of a block.
@@ -559,7 +580,7 @@ class _BlockManager(object):
                     if b is None:
                         return
                     self._keep.get(b)
-                except:
+                except Exception:
                     pass
 
         with self.lock:
@@ -577,7 +598,9 @@ class _BlockManager(object):
 
 
 class ArvadosFile(object):
-    """ArvadosFile manages the underlying representation of a file in Keep as a
+    """Represent a file in a Collection.
+
+    ArvadosFile manages the underlying representation of a file in Keep as a
     sequence of segments spanning a set of blocks, and implements random
     read/write access.
 
@@ -585,7 +608,7 @@ class ArvadosFile(object):
 
     """
 
-    def __init__(self, parent, stream=[], segments=[]):
+    def __init__(self, parent, name, stream=[], segments=[]):
         """
         ArvadosFile constructor.
 
@@ -596,6 +619,7 @@ class ArvadosFile(object):
           a list of Range objects representing segments
         """
         self.parent = parent
+        self.name = name
         self._modified = True
         self._segments = []
         self.lock = parent.root_collection().lock
@@ -603,17 +627,17 @@ class ArvadosFile(object):
             self._add_segment(stream, s.locator, s.range_size)
         self._current_bblock = None
 
-    def sync_mode(self):
-        return self.parent.sync_mode()
+    def writable(self):
+        return self.parent.writable()
 
     @synchronized
     def segments(self):
         return copy.copy(self._segments)
 
     @synchronized
-    def clone(self, new_parent):
+    def clone(self, new_parent, new_name):
         """Make a copy of this file."""
-        cp = ArvadosFile(new_parent)
+        cp = ArvadosFile(new_parent, new_name)
         cp.replace_contents(self)
         return cp
 
@@ -700,7 +724,7 @@ class ArvadosFile(object):
                     # segment is past the trucate size, all done
                     break
                 elif size < range_end:
-                    nr = Range(r.locator, r.range_start, size - r.range_start)
+                    nr = Range(r.locator, r.range_start, size - r.range_start, 0)
                     nr.segment_offset = r.segment_offset
                     new_segs.append(nr)
                     break
@@ -710,10 +734,16 @@ class ArvadosFile(object):
             self._segments = new_segs
             self._modified = True
         elif size > self.size():
-            raise IOError("truncate() does not support extending the file size")
+            raise IOError(errno.EINVAL, "truncate() does not support extending the file size")
 
-    def readfrom(self, offset, size, num_retries):
-        """Read upto `size` bytes from the file starting at `offset`."""
+    def readfrom(self, offset, size, num_retries, exact=False):
+        """Read upto `size` bytes from the file starting at `offset`.
+
+        :exact:
+         If False (default), return less data than requested if the read
+         crosses a block boundary and the next block isn't cached.  If True,
+         only return less data than requested when hitting EOF.
+        """
 
         with self.lock:
             if size == 0 or offset >= self.size():
@@ -726,16 +756,15 @@ class ArvadosFile(object):
 
         data = []
         for lr in readsegs:
-            block = self.parent._my_block_manager().get_block_contents(lr.locator, num_retries=num_retries, cache_only=bool(data))
+            block = self.parent._my_block_manager().get_block_contents(lr.locator, num_retries=num_retries, cache_only=(bool(data) and not exact))
             if block:
                 data.append(block[lr.segment_offset:lr.segment_offset+lr.segment_size])
             else:
                 break
         return ''.join(data)
 
-    def _repack_writes(self):
-        """Test if the buffer block has more data than is referenced by actual
-        segments.
+    def _repack_writes(self, num_retries):
+        """Test if the buffer block has more data than actual segments.
 
         This happens when a buffered write over-writes a file range written in
         a previous buffered write.  Re-pack the buffer block for efficiency
@@ -752,9 +781,10 @@ class ArvadosFile(object):
         if write_total < self._current_bblock.size():
             # There is more data in the buffer block than is actually accounted for by segments, so
             # re-pack into a new buffer by copying over to a new buffer block.
+            contents = self.parent._my_block_manager().get_block_contents(self._current_bblock.blockid, num_retries)
             new_bb = self.parent._my_block_manager().alloc_bufferblock(self._current_bblock.blockid, starting_capacity=write_total, owner=self)
             for t in bufferblock_segs:
-                new_bb.append(self._current_bblock.buffer_view[t.segment_offset:t.segment_offset+t.range_size].tobytes())
+                new_bb.append(contents[t.segment_offset:t.segment_offset+t.range_size])
                 t.segment_offset = new_bb.size() - t.range_size
 
             self._current_bblock = new_bb
@@ -783,20 +813,34 @@ class ArvadosFile(object):
             self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)
 
         if (self._current_bblock.size() + len(data)) > config.KEEP_BLOCK_SIZE:
-            self._repack_writes()
+            self._repack_writes(num_retries)
             if (self._current_bblock.size() + len(data)) > config.KEEP_BLOCK_SIZE:
-                self.parent._my_block_manager().commit_bufferblock(self._current_bblock)
+                self.parent._my_block_manager().commit_bufferblock(self._current_bblock, False)
                 self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)
 
         self._current_bblock.append(data)
 
         replace_range(self._segments, offset, len(data), self._current_bblock.blockid, self._current_bblock.write_pointer - len(data))
 
+        self.parent.notify(MOD, self.parent, self.name, (self, self))
+
+        return len(data)
+
+    @synchronized
+    def flush(self, wait=True, num_retries=0):
+        if self.modified():
+            if self._current_bblock and self._current_bblock.state() == _BufferBlock.WRITABLE:
+                self._repack_writes(num_retries)
+                self.parent._my_block_manager().commit_bufferblock(self._current_bblock, wait)
+            self.parent.notify(MOD, self.parent, self.name, (self, self))
+
     @must_be_writable
     @synchronized
     def add_segment(self, blocks, pos, size):
-        """Add a segment to the end of the file, with `pos` and `offset` referencing a
-        section of the stream described by `blocks` (a list of Range objects)
+        """Add a segment to the end of the file.
+
+        `pos` and `offset` reference a section of the stream described by
+        `blocks` (a list of Range objects)
 
         """
         self._add_segment(blocks, pos, size)
@@ -805,7 +849,7 @@ class ArvadosFile(object):
         """Internal implementation of add_segment."""
         self._modified = True
         for lr in locators_and_ranges(blocks, pos, size):
-            last = self._segments[-1] if self._segments else Range(0, 0, 0)
+            last = self._segments[-1] if self._segments else Range(0, 0, 0, 0)
             r = Range(lr.locator, last.range_start+last.range_size, lr.segment_size, lr.segment_offset)
             self._segments.append(r)
 
@@ -818,25 +862,32 @@ class ArvadosFile(object):
         else:
             return 0
 
-
     @synchronized
     def manifest_text(self, stream_name=".", portable_locators=False, normalize=False):
         buf = ""
-        item = self
         filestream = []
-        for segment in item.segments:
+        for segment in self.segments:
             loc = segment.locator
             if loc.startswith("bufferblock"):
-                loc = item._bufferblocks[loc].calculate_locator()
+                loc = self._bufferblocks[loc].calculate_locator()
             if portable_locators:
                 loc = KeepLocator(loc).stripped()
             filestream.append(LocatorAndRange(loc, locator_block_size(loc),
                                  segment.segment_offset, segment.range_size))
-        stream[stream_name] = filestream
-        buf += ' '.join(normalize_stream(stream_name, stream))
+        buf += ' '.join(normalize_stream(stream_name, {stream_name: filestream}))
         buf += "\n"
         return buf
 
+    @must_be_writable
+    @synchronized
+    def reparent(self, newparent, newname):
+        self.flush()
+        self.parent.remove(self.name)
+
+        self.parent = newparent
+        self.name = newname
+        self.lock = self.parent.root_collection().lock
+        self._modified = True
 
 class ArvadosFileReader(ArvadosFileReaderBase):
     """Wraps ArvadosFile in a file-like object supporting reading only.
@@ -846,8 +897,8 @@ class ArvadosFileReader(ArvadosFileReaderBase):
 
     """
 
-    def __init__(self, arvadosfile, name, mode="r", num_retries=None):
-        super(ArvadosFileReader, self).__init__(name, mode, num_retries=num_retries)
+    def __init__(self, arvadosfile,  mode="r", num_retries=None):
+        super(ArvadosFileReader, self).__init__(arvadosfile.name, mode, num_retries=num_retries)
         self.arvadosfile = arvadosfile
 
     def size(self):
@@ -858,16 +909,32 @@ class ArvadosFileReader(ArvadosFileReaderBase):
 
     @_FileLikeObjectBase._before_close
     @retry_method
-    def read(self, size, num_retries=None):
-        """Read up to `size` bytes from the stream, starting at the current file position."""
-        data = self.arvadosfile.readfrom(self._filepos, size, num_retries)
-        self._filepos += len(data)
-        return data
+    def read(self, size=None, num_retries=None):
+        """Read up to `size` bytes from the file and return the result.
+
+        Starts at the current file position.  If `size` is None, read the
+        entire remainder of the file.
+        """
+        if size is None:
+            data = []
+            rd = self.arvadosfile.readfrom(self._filepos, config.KEEP_BLOCK_SIZE, num_retries)
+            while rd:
+                data.append(rd)
+                self._filepos += len(rd)
+                rd = self.arvadosfile.readfrom(self._filepos, config.KEEP_BLOCK_SIZE, num_retries)
+            return ''.join(data)
+        else:
+            data = self.arvadosfile.readfrom(self._filepos, size, num_retries)
+            self._filepos += len(data)
+            return data
 
     @_FileLikeObjectBase._before_close
     @retry_method
     def readfrom(self, offset, size, num_retries=None):
-        """Read up to `size` bytes from the stream, starting at the current file position."""
+        """Read up to `size` bytes from the stream, starting at the specified file offset.
+
+        This method does not change the file position.
+        """
         return self.arvadosfile.readfrom(offset, size, num_retries)
 
     def flush(self):
@@ -882,8 +949,8 @@ class ArvadosFileWriter(ArvadosFileReader):
 
     """
 
-    def __init__(self, arvadosfile, name, mode, num_retries=None):
-        super(ArvadosFileWriter, self).__init__(arvadosfile, name, mode, num_retries=num_retries)
+    def __init__(self, arvadosfile, mode, num_retries=None):
+        super(ArvadosFileWriter, self).__init__(arvadosfile, mode, num_retries=num_retries)
 
     @_FileLikeObjectBase._before_close
     @retry_method
@@ -893,6 +960,7 @@ class ArvadosFileWriter(ArvadosFileReader):
         else:
             self.arvadosfile.writeto(self._filepos, data, num_retries)
             self._filepos += len(data)
+        return len(data)
 
     @_FileLikeObjectBase._before_close
     @retry_method
@@ -900,6 +968,7 @@ class ArvadosFileWriter(ArvadosFileReader):
         for s in seq:
             self.write(s, num_retries)
 
+    @_FileLikeObjectBase._before_close
     def truncate(self, size=None):
         if size is None:
             size = self._filepos
@@ -907,6 +976,11 @@ class ArvadosFileWriter(ArvadosFileReader):
         if self._filepos > self.size():
             self._filepos = self.size()
 
+    @_FileLikeObjectBase._before_close
+    def flush(self):
+        self.arvadosfile.flush()
+
     def close(self):
-        if self.arvadosfile.parent.sync_mode() == SYNC_LIVE:
-            self.arvadosfile.parent.root_collection().save()
+        if not self.closed:
+            self.flush()
+            super(ArvadosFileWriter, self).close()