4823: Working on method documentation and comments for arvfile
[arvados.git] / sdk / python / arvados / arvfile.py
index 836927257d0e7a5e1c25911ba49f98aea8ac121e..2ec1079af12821bac91d634b749f4ce14da22e61 100644 (file)
@@ -208,19 +208,42 @@ class StreamFileReader(ArvadosFileReaderBase):
 
 
 class BufferBlock(object):
+'''
+A BufferBlock is a stand-in for a Keep block that is in the process of being
+written.  Writers can append to it, get the size, and compute the Keep locator.
+
+There are three valid states:
+
+WRITABLE - can append
+
+PENDING - is in the process of being uploaded to Keep, append is an error
+
+COMMITTED - the block has been written to Keep, its internal buffer has been
+released, and the BufferBlock should be discarded in favor of fetching the
+block through normal Keep means.
+'''
     WRITABLE = 0
     PENDING = 1
     COMMITTED = 2
 
-    def __init__(self, blockid, starting_size):
+    def __init__(self, blockid, starting_capacity):
+        '''
+        blockid: the identifier for this block
+        starting_capacity: the initial buffer capacity
+        '''
         self.blockid = blockid
-        self.buffer_block = bytearray(starting_size)
+        self.buffer_block = bytearray(starting_capacity)
         self.buffer_view = memoryview(self.buffer_block)
         self.write_pointer = 0
         self.state = BufferBlock.WRITABLE
         self._locator = None
 
     def append(self, data):
+        '''
+        Append some data to the buffer.  Only valid if the block is in WRITABLE
+        state.  Implements an expanding buffer, doubling capacity as needed to
+        accomdate all the data.
+        '''
         if self.state == BufferBlock.WRITABLE:
             while (self.write_pointer+len(data)) > len(self.buffer_block):
                 new_buffer_block = bytearray(len(self.buffer_block) * 2)
@@ -234,19 +257,33 @@ class BufferBlock(object):
             raise AssertionError("Buffer block is not writable")
 
     def size(self):
+        '''Amount of data written to the buffer'''
         return self.write_pointer
 
     def locator(self):
+        '''The Keep locator for this buffer's contents.'''
         if self._locator is None:
             self._locator = "%s+%i" % (hashlib.md5(self.buffer_view[0:self.write_pointer]).hexdigest(), self.size())
         return self._locator
 
 class AsyncKeepWriteErrors(Exception):
+    '''
+    Roll up one or more Keep write exceptions (generated by background
+    threads) into a single one.
+    '''
     def __init__(self, errors):
         self.errors = errors
 
+    def __repr__(self):
+        return "\n".join(self.errors)
+
 class BlockManager(object):
+    '''
+    BlockManager handles buffer blocks, background block uploads, and
+    background block prefetch for a Collection of ArvadosFiles.
+    '''
     def __init__(self, keep):
+        '''keep: KeepClient object to use'''
         self._keep = keep
         self._bufferblocks = {}
         self._put_queue = None
@@ -255,14 +292,22 @@ class BlockManager(object):
         self._prefetch_queue = None
         self._prefetch_threads = None
 
-    def alloc_bufferblock(self, blockid=None, starting_size=2**14):
+    def alloc_bufferblock(self, blockid=None, starting_capacity=2**14):
+        '''
+        Allocate a new, empty bufferblock in WRITABLE state and return it.
+        blockid: optional block identifier, otherwise one will be automatically assigned
+        starting_capacity: optional capacity, otherwise will use default capacity
+        '''
         if blockid is None:
             blockid = "bufferblock%i" % len(self._bufferblocks)
-        bb = BufferBlock(blockid, starting_size=starting_size)
+        bb = BufferBlock(blockid, starting_capacity=starting_capacity)
         self._bufferblocks[bb.blockid] = bb
         return bb
 
     def stop_threads(self):
+        '''
+        Shut down and wait for background upload and download threads to finish.
+        '''
         if self._put_threads is not None:
             for t in self._put_threads:
                 self._put_queue.put(None)
@@ -281,7 +326,15 @@ class BlockManager(object):
         self._prefetch_queue = None
 
     def commit_bufferblock(self, block):
+        '''
+        Initiate a background upload of a bufferblock.  This will block if the
+        upload queue is at capacity, otherwise it will return immediately.
+        '''
+
         def worker(self):
+            '''
+            Background uploader thread.
+            '''
             while True:
                 try:
                     b = self._put_queue.get()
@@ -299,26 +352,49 @@ class BlockManager(object):
                         self._put_queue.task_done()
 
         if self._put_threads is None:
-            self._put_queue = Queue.Queue()
+            # Start uploader threads.
+
+            # If we don't limit the Queue size, the upload queue can quickly
+            # grow to take up gigabytes of RAM if the writing process is
+            # generating data more quickly than it can be send to the Keep
+            # servers.
+            #
+            # With two upload threads and a queue size of 2, this means up to 4
+            # blocks pending.  If they are full 64 MiB blocks, that means up to
+            # 256 MiB of internal buffering, which is the same size as the
+            # default download block cache in KeepClient.
+            self._put_queue = Queue.Queue(maxsize=2)
             self._put_errors = Queue.Queue()
             self._put_threads = [threading.Thread(target=worker, args=(self,)),
-                                threading.Thread(target=worker, args=(self,))]
-            self._put_threads[0].start()
-            self._put_threads[1].start()
+                                 threading.Thread(target=worker, args=(self,))]
+            for t in self._put_threads:
+                t.daemon = True
+                t.start()
 
+        # Mark the block as PENDING so to disallow any more appends.
         block.state = BufferBlock.PENDING
         self._put_queue.put(block)
 
-    def get_block(self, locator, num_retries):
+    def get_block(self, locator, num_retries, cache_only=False):
+        '''
+        Fetch a block.  First checks to see if the locator is a BufferBlock and
+        return that, if not, passes the request through to KeepClient.get().
+        '''
         if locator in self._bufferblocks:
             bb = self._bufferblocks[locator]
             if bb.state != BufferBlock.COMMITTED:
                 return bb.buffer_view[0:bb.write_pointer].tobytes()
             else:
                 locator = bb._locator
-        return self._keep.get(locator, num_retries=num_retries)
+        return self._keep.get(locator, num_retries=num_retries, cache_only=cache_only)
 
     def commit_all(self):
+        '''
+        Commit all outstanding buffer blocks.  Unlike commit_bufferblock(), this
+        is a synchronous call, and will not return until all buffer blocks are
+        uploaded.  Raises AsyncKeepWriteErrors() if any blocks failed to
+        upload.
+        '''
         for k,v in self._bufferblocks.items():
             if v.state == BufferBlock.WRITABLE:
                 self.commit_bufferblock(v)
@@ -334,7 +410,14 @@ class BlockManager(object):
                 raise AsyncKeepWriteErrors(e)
 
     def block_prefetch(self, locator):
+        '''
+        Initiate a background download of a block.  This assumes that the
+        underlying KeepClient implements a block cache, so repeated requests
+        for the same block will not result in repeated downloads (unless the
+        block is evicted from the cache.)  This method does not block.
+        '''
         def worker(self):
+            '''Background downloader thread.'''
             while True:
                 try:
                     b = self._prefetch_queue.get()
@@ -348,23 +431,43 @@ class BlockManager(object):
             return
         if self._prefetch_threads is None:
             self._prefetch_queue = Queue.Queue()
-            self._prefetch_threads = [threading.Thread(target=worker, args=(self,))]
-            self._prefetch_threads[0].start()
+            self._prefetch_threads = [threading.Thread(target=worker, args=(self,)),
+                                      threading.Thread(target=worker, args=(self,))]
+            for t in self._prefetch_threads:
+                t.daemon = True
+                t.start()
         self._prefetch_queue.put(locator)
 
 class ArvadosFile(object):
-    def __init__(self, parent, stream=[], segments=[], keep=None):
+    '''
+    Manages the underyling representation of a file in Keep as a sequence of
+    segments over a set of blocks, supporting random read/write access.
+    '''
+
+    def __init__(self, parent, stream=[], segments=[]):
         '''
         stream: a list of Range objects representing a block stream
         segments: a list of Range objects representing segments
         '''
         self.parent = parent
         self._modified = True
-        self._segments = []
+        self.segments = []
         for s in segments:
-            self.add_segment(stream, s.range_start, s.range_size)
+            self.add_segment(stream, s.locator, s.range_size)
         self._current_bblock = None
-        self._keep = keep
+        self.lock = threading.Lock()
+
+    def clone(self):
+        '''
+        Make a copy of this file.
+        '''
+        # TODO: copy bufferblocks.
+        with self.lock:
+            cp = ArvadosFile()
+            cp.parent = self.parent
+            cp._modified = False
+            cp.segments = [Range(r.locator, r.range_start, r.range_size, r.segment_offset) for r in self.segments]
+            return cp
 
     def set_unmodified(self):
         self._modified = False
@@ -374,7 +477,7 @@ class ArvadosFile(object):
 
     def truncate(self, size):
         new_segs = []
-        for r in self._segments:
+        for r in self.segments:
             range_end = r.range_start+r.range_size
             if r.range_start >= size:
                 # segment is past the trucate size, all done
@@ -387,23 +490,23 @@ class ArvadosFile(object):
             else:
                 new_segs.append(r)
 
-        self._segments = new_segs
+        self.segments = new_segs
         self._modified = True
 
     def readfrom(self, offset, size, num_retries):
         if size == 0 or offset >= self.size():
             return ''
-        if self._keep is None:
-            self._keep = KeepClient(num_retries=num_retries)
         data = []
 
-        for lr in locators_and_ranges(self._segments, offset, size + config.KEEP_BLOCK_SIZE):
+        for lr in locators_and_ranges(self.segments, offset, size + config.KEEP_BLOCK_SIZE):
             self.parent._my_block_manager().block_prefetch(lr.locator)
 
-        for lr in locators_and_ranges(self._segments, offset, size):
-            # TODO: if data is empty, wait on block get, otherwise only
-            # get more data if the block is already in the cache.
-            data.append(self.parent._my_block_manager().get_block(lr.locator, num_retries=num_retries)[lr.segment_offset:lr.segment_offset+lr.segment_size])
+        for lr in locators_and_ranges(self.segments, offset, size):
+            d = self.parent._my_block_manager().get_block(lr.locator, num_retries=num_retries, cache_only=bool(data))
+            if d:
+                data.append(d[lr.segment_offset:lr.segment_offset+lr.segment_size])
+            else:
+                break
         return ''.join(data)
 
     def _repack_writes(self):
@@ -412,7 +515,7 @@ class ArvadosFile(object):
         a previous buffered write).  Re-pack the buffer block for efficiency
         and to avoid leaking information.
         '''
-        segs = self._segments
+        segs = self.segments
 
         # Sum up the segments to get the total bytes of the file referencing
         # into the buffer block.
@@ -451,18 +554,18 @@ class ArvadosFile(object):
                 self._current_bblock = self.parent._my_block_manager().alloc_bufferblock()
 
         self._current_bblock.append(data)
-        replace_range(self._segments, offset, len(data), self._current_bblock.blockid, self._current_bblock.write_pointer - len(data))
+        replace_range(self.segments, offset, len(data), self._current_bblock.blockid, self._current_bblock.write_pointer - len(data))
 
     def add_segment(self, blocks, pos, size):
         self._modified = True
         for lr in locators_and_ranges(blocks, pos, size):
-            last = self._segments[-1] if self._segments else Range(0, 0, 0)
+            last = self.segments[-1] if self.segments else Range(0, 0, 0)
             r = Range(lr.locator, last.range_start+last.range_size, lr.segment_size, lr.segment_offset)
-            self._segments.append(r)
+            self.segments.append(r)
 
     def size(self):
-        if self._segments:
-            n = self._segments[-1]
+        if self.segments:
+            n = self.segments[-1]
             return n.range_start + n.range_size
         else:
             return 0
@@ -471,7 +574,7 @@ class ArvadosFile(object):
 class ArvadosFileReader(ArvadosFileReaderBase):
     def __init__(self, arvadosfile, name, mode="r", num_retries=None):
         super(ArvadosFileReader, self).__init__(name, mode, num_retries=num_retries)
-        self.arvadosfile = arvadosfile
+        self.arvadosfile = arvadosfile.clone()
 
     def size(self):
         return self.arvadosfile.size()
@@ -493,9 +596,23 @@ class ArvadosFileReader(ArvadosFileReaderBase):
     def flush(self):
         pass
 
+
+class SynchronizedArvadosFile(object):
+    def __init__(self, arvadosfile):
+        self.arvadosfile = arvadosfile
+
+    def clone(self):
+        return self
+
+    def __getattr__(self, name):
+        with self.arvadosfile.lock:
+            return getattr(self.arvadosfile, name)
+
+
 class ArvadosFileWriter(ArvadosFileReader):
     def __init__(self, arvadosfile, name, mode, num_retries=None):
-        super(ArvadosFileWriter, self).__init__(arvadosfile, name, mode, num_retries=num_retries)
+        self.arvadosfile = SynchronizedArvadosFile(arvadosfile)
+        super(ArvadosFileWriter, self).__init__(self.arvadosfile, name, mode, num_retries=num_retries)
 
     @ArvadosFileBase._before_close
     @retry_method