import errno
import re
import logging
+import collections
from .errors import KeepWriteError, AssertionError, ArgumentError
from .keep import KeepLocator
cache_pos, cache_data = self._readline_cache
if self.tell() == cache_pos:
data = [cache_data]
+ self._filepos += len(cache_data)
else:
data = ['']
data_size = len(data[-1])
except ValueError:
nextline_index = len(data)
nextline_index = min(nextline_index, size)
+ self._filepos -= len(data) - nextline_index
self._readline_cache = (self.tell(), data[nextline_index:])
return data[:nextline_index]
DEFAULT_PUT_THREADS = 2
DEFAULT_GET_THREADS = 2
- def __init__(self, keep):
+ def __init__(self, keep, copies=None):
"""keep: KeepClient object to use"""
self._keep = keep
- self._bufferblocks = {}
+ self._bufferblocks = collections.OrderedDict()
self._put_queue = None
self._put_threads = None
self._prefetch_queue = None
self.prefetch_enabled = True
self.num_put_threads = _BlockManager.DEFAULT_PUT_THREADS
self.num_get_threads = _BlockManager.DEFAULT_GET_THREADS
+ self.copies = copies
@synchronized
def alloc_bufferblock(self, blockid=None, starting_capacity=2**14, owner=None):
if bufferblock is None:
return
- loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes())
+ if self.copies is None:
+ loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes())
+ else:
+ loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes(), copies=self.copies)
bufferblock.set_state(_BufferBlock.COMMITTED, loc)
except Exception as e:
def __exit__(self, exc_type, exc_value, traceback):
self.stop_threads()
- def __del__(self):
- self.stop_threads()
+ @synchronized
+ def repack_small_blocks(self, force=False, sync=False):
+ """Packs small blocks together before uploading"""
+ # Search blocks ready for getting packed together before being committed to Keep.
+ small_blocks = [b for b in self._bufferblocks.values() if b.state() == _BufferBlock.WRITABLE and b.owner.closed()]
+ if len(small_blocks) <= 1:
+ # Not enough small blocks for repacking
+ return
+
+ # Check if there are enough small blocks for filling up one in full
+ pending_write_size = sum([b.size() for b in small_blocks])
+ if force or (pending_write_size >= config.KEEP_BLOCK_SIZE):
+ new_bb = _BufferBlock("bufferblock%i" % len(self._bufferblocks), 2**14, None)
+ self._bufferblocks[new_bb.blockid] = new_bb
+ size = 0
+ while len(small_blocks) > 0 and (size + small_blocks[0].size()) <= config.KEEP_BLOCK_SIZE:
+ bb = small_blocks.pop(0)
+ size += bb.size()
+ arvfile = bb.owner
+ new_bb.append(bb.buffer_view[0:bb.write_pointer].tobytes())
+ arvfile.set_segments([Range(new_bb.blockid, 0, bb.size(), size-bb.size())])
+ bb.clear()
+ del self._bufferblocks[bb.blockid]
+ self.commit_bufferblock(new_bb, sync=sync)
def commit_bufferblock(self, block, sync):
"""Initiate a background upload of a bufferblock.
which case it will wait on an upload queue slot.
"""
-
try:
# Mark the block as PENDING so to disallow any more appends.
block.set_state(_BufferBlock.PENDING)
except StateChangeError as e:
- if e.state == _BufferBlock.PENDING and sync:
- block.wait_for_commit.wait()
- if block.state() == _BufferBlock.ERROR:
- raise block.error
- return
+ if e.state == _BufferBlock.PENDING:
+ if sync:
+ block.wait_for_commit.wait()
+ else:
+ return
+ if block.state() == _BufferBlock.COMMITTED:
+ return
+ elif block.state() == _BufferBlock.ERROR:
+ raise block.error
+ else:
+ raise
if sync:
try:
- loc = self._keep.put(block.buffer_view[0:block.write_pointer].tobytes())
+ if self.copies is None:
+ loc = self._keep.put(block.buffer_view[0:block.write_pointer].tobytes())
+ else:
+ loc = self._keep.put(block.buffer_view[0:block.write_pointer].tobytes(), copies=self.copies)
block.set_state(_BufferBlock.COMMITTED, loc)
except Exception as e:
block.set_state(_BufferBlock.ERROR, e)
are uploaded. Raises KeepWriteError() if any blocks failed to upload.
"""
+ self.repack_small_blocks(force=True, sync=True)
+
with self.lock:
items = self._bufferblocks.items()
for k,v in items:
- if v.state() != _BufferBlock.COMMITTED:
+ if v.state() != _BufferBlock.COMMITTED and v.owner:
v.owner.flush(sync=False)
with self.lock:
"""
self.parent = parent
self.name = name
+ self._writers = set()
self._committed = False
self._segments = []
self.lock = parent.root_collection().lock
def __ne__(self, other):
return not self.__eq__(other)
+ @synchronized
+ def set_segments(self, segs):
+ self._segments = segs
+
@synchronized
def set_committed(self):
- """Set committed flag to False"""
+ """Set committed flag to True"""
self._committed = True
@synchronized
"""Get whether this is committed or not."""
return self._committed
+ @synchronized
+ def add_writer(self, writer):
+ """Add an ArvadosFileWriter reference to the list of writers"""
+ if isinstance(writer, ArvadosFileWriter):
+ self._writers.add(writer)
+
+ @synchronized
+ def remove_writer(self, writer):
+ """
+ Called from ArvadosFileWriter.close(). Remove a writer reference from the list
+ and do some block maintenance tasks.
+ """
+ self._writers.remove(writer)
+
+ if self.size() > config.KEEP_BLOCK_SIZE / 2:
+ # File writer closed, not small enough for repacking
+ self.flush()
+ elif self.closed():
+ # All writers closed and size is adequate for repacking
+ self.parent._my_block_manager().repack_small_blocks()
+
+ def closed(self):
+ """
+ Get whether this is closed or not. When the writers list is empty, the file
+ is supposed to be closed.
+ """
+ return len(self._writers) == 0
+
@must_be_writable
@synchronized
def truncate(self, size):
def __init__(self, arvadosfile, mode, num_retries=None):
super(ArvadosFileWriter, self).__init__(arvadosfile, num_retries=num_retries)
self.mode = mode
+ self.arvadosfile.add_writer(self)
@_FileLikeObjectBase._before_close
@retry_method
def close(self):
if not self.closed:
- self.flush()
+ self.arvadosfile.remove_writer(self)
super(ArvadosFileWriter, self).close()