11308: Avoid Python2-inefficient list() operations.
[arvados.git] / sdk / python / arvados / arvfile.py
index c394dab810715c2659b6f72f8f5f1e173d711ead..c52c7727aef048cfb1d3af21e18589f590a9ec26 100644 (file)
@@ -1,18 +1,26 @@
-import functools
-import os
-import zlib
+from __future__ import absolute_import
+from __future__ import division
+from future import standard_library
+from future.utils import listitems, listvalues
+standard_library.install_aliases()
+from builtins import range
+from builtins import object
 import bz2
-import config
-import hashlib
-import threading
-import Queue
+import collections
 import copy
 import errno
-import re
+import functools
+import hashlib
 import logging
-import collections
+import os
+import queue
+import re
+import sys
+import threading
 import uuid
+import zlib
 
+from . import config
 from .errors import KeepWriteError, AssertionError, ArgumentError
 from .keep import KeepLocator
 from ._normalize_stream import normalize_stream
@@ -38,6 +46,12 @@ def split(path):
         stream_name, file_name = '.', path
     return stream_name, file_name
 
+
+class UnownedBlockError(Exception):
+    """Raised when there's an writable block without an owner on the BlockManager."""
+    pass
+
+
 class _FileLikeObjectBase(object):
     def __init__(self, name, mode):
         self.name = name
@@ -70,7 +84,10 @@ class _FileLikeObjectBase(object):
 class ArvadosFileReaderBase(_FileLikeObjectBase):
     def __init__(self, name, mode, num_retries=None):
         super(ArvadosFileReaderBase, self).__init__(name, mode)
-        self._filepos = 0L
+        self._binary = 'b' in mode
+        if sys.version_info >= (3, 0) and not self._binary:
+            raise NotImplementedError("text mode {!r} is not implemented".format(mode))
+        self._filepos = 0
         self.num_retries = num_retries
         self._readline_cache = (None, None)
 
@@ -90,7 +107,7 @@ class ArvadosFileReaderBase(_FileLikeObjectBase):
             pos += self._filepos
         elif whence == os.SEEK_END:
             pos += self.size()
-        self._filepos = min(max(pos, 0L), self.size())
+        self._filepos = min(max(pos, 0), self.size())
 
     def tell(self):
         return self._filepos
@@ -100,7 +117,7 @@ class ArvadosFileReaderBase(_FileLikeObjectBase):
     def readall(self, size=2**20, num_retries=None):
         while True:
             data = self.read(size, num_retries=num_retries)
-            if data == '':
+            if len(data) == 0:
                 break
             yield data
 
@@ -112,23 +129,23 @@ class ArvadosFileReaderBase(_FileLikeObjectBase):
             data = [cache_data]
             self._filepos += len(cache_data)
         else:
-            data = ['']
+            data = [b'']
         data_size = len(data[-1])
-        while (data_size < size) and ('\n' not in data[-1]):
+        while (data_size < size) and (b'\n' not in data[-1]):
             next_read = self.read(2 ** 20, num_retries=num_retries)
             if not next_read:
                 break
             data.append(next_read)
             data_size += len(next_read)
-        data = ''.join(data)
+        data = b''.join(data)
         try:
-            nextline_index = data.index('\n') + 1
+            nextline_index = data.index(b'\n') + 1
         except ValueError:
             nextline_index = len(data)
         nextline_index = min(nextline_index, size)
         self._filepos -= len(data) - nextline_index
         self._readline_cache = (self.tell(), data[nextline_index:])
-        return data[:nextline_index]
+        return data[:nextline_index].decode()
 
     @_FileLikeObjectBase._before_close
     @retry_method
@@ -163,7 +180,7 @@ class ArvadosFileReaderBase(_FileLikeObjectBase):
             data_size += len(s)
             if data_size >= sizehint:
                 break
-        return ''.join(data).splitlines(True)
+        return b''.join(data).decode().splitlines(True)
 
     def size(self):
         raise NotImplementedError()
@@ -200,15 +217,15 @@ class StreamFileReader(ArvadosFileReaderBase):
     def read(self, size, num_retries=None):
         """Read up to 'size' bytes from the stream, starting at the current file position"""
         if size == 0:
-            return ''
+            return b''
 
-        data = ''
+        data = b''
         available_chunks = locators_and_ranges(self.segments, self._filepos, size)
         if available_chunks:
             lr = available_chunks[0]
             data = self._stream.readfrom(lr.locator+lr.segment_offset,
-                                          lr.segment_size,
-                                          num_retries=num_retries)
+                                         lr.segment_size,
+                                         num_retries=num_retries)
 
         self._filepos += len(data)
         return data
@@ -218,13 +235,13 @@ class StreamFileReader(ArvadosFileReaderBase):
     def readfrom(self, start, size, num_retries=None):
         """Read up to 'size' bytes from the stream, starting at 'start'"""
         if size == 0:
-            return ''
+            return b''
 
         data = []
         for lr in locators_and_ranges(self.segments, start, size):
             data.append(self._stream.readfrom(lr.locator+lr.segment_offset, lr.segment_size,
                                               num_retries=num_retries))
-        return ''.join(data)
+        return b''.join(data)
 
     def as_manifest(self):
         segs = []
@@ -304,6 +321,8 @@ class _BufferBlock(object):
 
         """
         if self._state == _BufferBlock.WRITABLE:
+            if not isinstance(data, bytes) and not isinstance(data, memoryview):
+                data = data.encode()
             while (self.write_pointer+len(data)) > len(self.buffer_block):
                 new_buffer_block = bytearray(len(self.buffer_block) * 2)
                 new_buffer_block[0:self.write_pointer] = self.buffer_block[0:self.write_pointer]
@@ -404,7 +423,7 @@ class _BlockManager(object):
     DEFAULT_PUT_THREADS = 2
     DEFAULT_GET_THREADS = 2
 
-    def __init__(self, keep, copies=None):
+    def __init__(self, keep, copies=None, put_threads=None):
         """keep: KeepClient object to use"""
         self._keep = keep
         self._bufferblocks = collections.OrderedDict()
@@ -414,9 +433,14 @@ class _BlockManager(object):
         self._prefetch_threads = None
         self.lock = threading.Lock()
         self.prefetch_enabled = True
-        self.num_put_threads = _BlockManager.DEFAULT_PUT_THREADS
+        if put_threads:
+            self.num_put_threads = put_threads
+        else:
+            self.num_put_threads = _BlockManager.DEFAULT_PUT_THREADS
         self.num_get_threads = _BlockManager.DEFAULT_GET_THREADS
         self.copies = copies
+        self._pending_write_size = 0
+        self.threads_lock = threading.Lock()
 
     @synchronized
     def alloc_bufferblock(self, blockid=None, starting_capacity=2**14, owner=None):
@@ -482,28 +506,28 @@ class _BlockManager(object):
                 if self._put_queue is not None:
                     self._put_queue.task_done()
 
-    @synchronized
     def start_put_threads(self):
-        if self._put_threads is None:
-            # Start uploader threads.
-
-            # If we don't limit the Queue size, the upload queue can quickly
-            # grow to take up gigabytes of RAM if the writing process is
-            # generating data more quickly than it can be send to the Keep
-            # servers.
-            #
-            # With two upload threads and a queue size of 2, this means up to 4
-            # blocks pending.  If they are full 64 MiB blocks, that means up to
-            # 256 MiB of internal buffering, which is the same size as the
-            # default download block cache in KeepClient.
-            self._put_queue = Queue.Queue(maxsize=2)
-
-            self._put_threads = []
-            for i in xrange(0, self.num_put_threads):
-                thread = threading.Thread(target=self._commit_bufferblock_worker)
-                self._put_threads.append(thread)
-                thread.daemon = True
-                thread.start()
+        with self.threads_lock:
+            if self._put_threads is None:
+                # Start uploader threads.
+
+                # If we don't limit the Queue size, the upload queue can quickly
+                # grow to take up gigabytes of RAM if the writing process is
+                # generating data more quickly than it can be send to the Keep
+                # servers.
+                #
+                # With two upload threads and a queue size of 2, this means up to 4
+                # blocks pending.  If they are full 64 MiB blocks, that means up to
+                # 256 MiB of internal buffering, which is the same size as the
+                # default download block cache in KeepClient.
+                self._put_queue = queue.Queue(maxsize=2)
+
+                self._put_threads = []
+                for i in range(0, self.num_put_threads):
+                    thread = threading.Thread(target=self._commit_bufferblock_worker)
+                    self._put_threads.append(thread)
+                    thread.daemon = True
+                    thread.start()
 
     def _block_prefetch_worker(self):
         """The background downloader thread."""
@@ -514,14 +538,14 @@ class _BlockManager(object):
                     return
                 self._keep.get(b)
             except Exception:
-                pass
+                _logger.exception("Exception doing block prefetch")
 
     @synchronized
     def start_get_threads(self):
         if self._prefetch_threads is None:
-            self._prefetch_queue = Queue.Queue()
+            self._prefetch_queue = queue.Queue()
             self._prefetch_threads = []
-            for i in xrange(0, self.num_get_threads):
+            for i in range(0, self.num_get_threads):
                 thread = threading.Thread(target=self._block_prefetch_worker)
                 self._prefetch_threads.append(thread)
                 thread.daemon = True
@@ -555,24 +579,38 @@ class _BlockManager(object):
         self.stop_threads()
 
     @synchronized
-    def repack_small_blocks(self, force=False, sync=False):
+    def repack_small_blocks(self, force=False, sync=False, closed_file_size=0):
         """Packs small blocks together before uploading"""
-        # Search blocks ready for getting packed together before being committed to Keep.
-        # A WRITABLE block always has an owner.
-        # A WRITABLE block with its owner.closed() implies that it's
-        # size is <= KEEP_BLOCK_SIZE/2.
-        small_blocks = [b for b in self._bufferblocks.values() if b.state() == _BufferBlock.WRITABLE and b.owner.closed()]
-        if len(small_blocks) <= 1:
-            # Not enough small blocks for repacking
-            return
+        self._pending_write_size += closed_file_size
 
         # Check if there are enough small blocks for filling up one in full
-        pending_write_size = sum([b.size() for b in small_blocks])
-        if force or (pending_write_size >= config.KEEP_BLOCK_SIZE):
+        if force or (self._pending_write_size >= config.KEEP_BLOCK_SIZE):
+
+            # Search blocks ready for getting packed together before being committed to Keep.
+            # A WRITABLE block always has an owner.
+            # A WRITABLE block with its owner.closed() implies that it's
+            # size is <= KEEP_BLOCK_SIZE/2.
+            try:
+                small_blocks = [b for b in listvalues(self._bufferblocks) if b.state() == _BufferBlock.WRITABLE and b.owner.closed()]
+            except AttributeError:
+                # Writable blocks without owner shouldn't exist.
+                raise UnownedBlockError()
+
+            if len(small_blocks) <= 1:
+                # Not enough small blocks for repacking
+                return
+
+            # Update the pending write size count with its true value, just in case
+            # some small file was opened, written and closed several times.
+            self._pending_write_size = sum([b.size() for b in small_blocks])
+            if self._pending_write_size < config.KEEP_BLOCK_SIZE and not force:
+                return
+
             new_bb = self._alloc_bufferblock()
             while len(small_blocks) > 0 and (new_bb.write_pointer + small_blocks[0].size()) <= config.KEEP_BLOCK_SIZE:
                 bb = small_blocks.pop(0)
                 arvfile = bb.owner
+                self._pending_write_size -= bb.size()
                 new_bb.append(bb.buffer_view[0:bb.write_pointer].tobytes())
                 arvfile.set_segments([Range(new_bb.blockid,
                                             0,
@@ -666,7 +704,7 @@ class _BlockManager(object):
         self.repack_small_blocks(force=True, sync=True)
 
         with self.lock:
-            items = self._bufferblocks.items()
+            items = listitems(self._bufferblocks)
 
         for k,v in items:
             if v.state() != _BufferBlock.COMMITTED and v.owner:
@@ -747,6 +785,14 @@ class ArvadosFile(object):
     def writable(self):
         return self.parent.writable()
 
+    @synchronized
+    def permission_expired(self, as_of_dt=None):
+        """Returns True if any of the segment's locators is expired"""
+        for r in self._segments:
+            if KeepLocator(r.locator).permission_expired(as_of_dt):
+                return True
+        return False
+
     @synchronized
     def segments(self):
         return copy.copy(self._segments)
@@ -778,7 +824,7 @@ class ArvadosFile(object):
 
             self._segments.append(Range(new_loc, other_segment.range_start, other_segment.range_size, other_segment.segment_offset))
 
-        self._committed = False
+        self.set_committed(False)
 
     def __eq__(self, other):
         if other is self:
@@ -790,7 +836,7 @@ class ArvadosFile(object):
         with self.lock:
             if len(self._segments) != len(othersegs):
                 return False
-            for i in xrange(0, len(othersegs)):
+            for i in range(0, len(othersegs)):
                 seg1 = self._segments[i]
                 seg2 = othersegs[i]
                 loc1 = seg1.locator
@@ -818,9 +864,18 @@ class ArvadosFile(object):
         self._segments = segs
 
     @synchronized
-    def set_committed(self):
-        """Set committed flag to True"""
-        self._committed = True
+    def set_committed(self, value=True):
+        """Set committed flag.
+
+        If value is True, set committed to be True.
+
+        If value is False, set committed to be False for this and all parents.
+        """
+        if value == self._committed:
+            return
+        self._committed = value
+        if self._committed is False and self.parent is not None:
+            self.parent.set_committed(False)
 
     @synchronized
     def committed(self):
@@ -841,12 +896,12 @@ class ArvadosFile(object):
         """
         self._writers.remove(writer)
 
-        if flush or self.size() > config.KEEP_BLOCK_SIZE / 2:
+        if flush or self.size() > config.KEEP_BLOCK_SIZE // 2:
             # File writer closed, not small enough for repacking
             self.flush()
         elif self.closed():
             # All writers closed and size is adequate for repacking
-            self.parent._my_block_manager().repack_small_blocks()
+            self.parent._my_block_manager().repack_small_blocks(closed_file_size=self.size())
 
     def closed(self):
         """
@@ -881,7 +936,7 @@ class ArvadosFile(object):
                     new_segs.append(r)
 
             self._segments = new_segs
-            self._committed = False
+            self.set_committed(False)
         elif size > self.size():
             raise IOError(errno.EINVAL, "truncate() does not support extending the file size")
 
@@ -896,7 +951,7 @@ class ArvadosFile(object):
 
         with self.lock:
             if size == 0 or offset >= self.size():
-                return ''
+                return b''
             readsegs = locators_and_ranges(self._segments, offset, size)
             prefetch = locators_and_ranges(self._segments, offset + size, config.KEEP_BLOCK_SIZE, limit=32)
 
@@ -916,7 +971,7 @@ class ArvadosFile(object):
                 self.parent._my_block_manager().block_prefetch(lr.locator)
                 locs.add(lr.locator)
 
-        return ''.join(data)
+        return b''.join(data)
 
     def _repack_writes(self, num_retries):
         """Test if the buffer block has more data than actual segments.
@@ -953,6 +1008,8 @@ class ArvadosFile(object):
         necessary.
 
         """
+        if not isinstance(data, bytes) and not isinstance(data, memoryview):
+            data = data.encode()
         if len(data) == 0:
             return
 
@@ -968,7 +1025,7 @@ class ArvadosFile(object):
                 n += config.KEEP_BLOCK_SIZE
             return
 
-        self._committed = False
+        self.set_committed(False)
 
         if self._current_bblock is None or self._current_bblock.state() != _BufferBlock.WRITABLE:
             self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)
@@ -1031,7 +1088,7 @@ class ArvadosFile(object):
 
     def _add_segment(self, blocks, pos, size):
         """Internal implementation of add_segment."""
-        self._committed = False
+        self.set_committed(False)
         for lr in locators_and_ranges(blocks, pos, size):
             last = self._segments[-1] if self._segments else Range(0, 0, 0, 0)
             r = Range(lr.locator, last.range_start+last.range_size, lr.segment_size, lr.segment_offset)
@@ -1047,12 +1104,15 @@ class ArvadosFile(object):
             return 0
 
     @synchronized
-    def manifest_text(self, stream_name=".", portable_locators=False, normalize=False):
+    def manifest_text(self, stream_name=".", portable_locators=False,
+                      normalize=False, only_committed=False):
         buf = ""
         filestream = []
         for segment in self.segments:
             loc = segment.locator
-            if loc.startswith("bufferblock"):
+            if self.parent._my_block_manager().is_bufferblock(loc):
+                if only_committed:
+                    continue
                 loc = self._bufferblocks[loc].calculate_locator()
             if portable_locators:
                 loc = KeepLocator(loc).stripped()
@@ -1065,7 +1125,7 @@ class ArvadosFile(object):
     @must_be_writable
     @synchronized
     def _reparent(self, newparent, newname):
-        self._committed = False
+        self.set_committed(False)
         self.flush(sync=True)
         self.parent.remove(self.name)
         self.parent = newparent
@@ -1081,8 +1141,8 @@ class ArvadosFileReader(ArvadosFileReaderBase):
 
     """
 
-    def __init__(self, arvadosfile, num_retries=None):
-        super(ArvadosFileReader, self).__init__(arvadosfile.name, "r", num_retries=num_retries)
+    def __init__(self, arvadosfile, mode="r", num_retries=None):
+        super(ArvadosFileReader, self).__init__(arvadosfile.name, mode=mode, num_retries=num_retries)
         self.arvadosfile = arvadosfile
 
     def size(self):
@@ -1106,7 +1166,7 @@ class ArvadosFileReader(ArvadosFileReaderBase):
                 data.append(rd)
                 self._filepos += len(rd)
                 rd = self.arvadosfile.readfrom(self._filepos, config.KEEP_BLOCK_SIZE, num_retries)
-            return ''.join(data)
+            return b''.join(data)
         else:
             data = self.arvadosfile.readfrom(self._filepos, size, num_retries, exact=True)
             self._filepos += len(data)
@@ -1134,8 +1194,7 @@ class ArvadosFileWriter(ArvadosFileReader):
     """
 
     def __init__(self, arvadosfile, mode, num_retries=None):
-        super(ArvadosFileWriter, self).__init__(arvadosfile, num_retries=num_retries)
-        self.mode = mode
+        super(ArvadosFileWriter, self).__init__(arvadosfile, mode=mode, num_retries=num_retries)
         self.arvadosfile.add_writer(self)
 
     @_FileLikeObjectBase._before_close