self.num_put_threads = _BlockManager.DEFAULT_PUT_THREADS
self.num_get_threads = _BlockManager.DEFAULT_GET_THREADS
self.copies = copies
+ self._pending_write_size = 0
+ self.threads_lock = threading.Lock()
@synchronized
def alloc_bufferblock(self, blockid=None, starting_capacity=2**14, owner=None):
ArvadosFile that owns this block
"""
+ return self._alloc_bufferblock(blockid, starting_capacity, owner)
+
+ def _alloc_bufferblock(self, blockid=None, starting_capacity=2**14, owner=None):
if blockid is None:
blockid = "%s" % uuid.uuid4()
bufferblock = _BufferBlock(blockid, starting_capacity=starting_capacity, owner=owner)
if self._put_queue is not None:
self._put_queue.task_done()
- @synchronized
def start_put_threads(self):
- if self._put_threads is None:
- # Start uploader threads.
-
- # If we don't limit the Queue size, the upload queue can quickly
- # grow to take up gigabytes of RAM if the writing process is
- # generating data more quickly than it can be send to the Keep
- # servers.
- #
- # With two upload threads and a queue size of 2, this means up to 4
- # blocks pending. If they are full 64 MiB blocks, that means up to
- # 256 MiB of internal buffering, which is the same size as the
- # default download block cache in KeepClient.
- self._put_queue = Queue.Queue(maxsize=2)
-
- self._put_threads = []
- for i in xrange(0, self.num_put_threads):
- thread = threading.Thread(target=self._commit_bufferblock_worker)
- self._put_threads.append(thread)
- thread.daemon = True
- thread.start()
+ with self.threads_lock:
+ if self._put_threads is None:
+ # Start uploader threads.
+
+ # If we don't limit the Queue size, the upload queue can quickly
+ # grow to take up gigabytes of RAM if the writing process is
+ # generating data more quickly than it can be send to the Keep
+ # servers.
+ #
+ # With two upload threads and a queue size of 2, this means up to 4
+ # blocks pending. If they are full 64 MiB blocks, that means up to
+ # 256 MiB of internal buffering, which is the same size as the
+ # default download block cache in KeepClient.
+ self._put_queue = Queue.Queue(maxsize=2)
+
+ self._put_threads = []
+ for i in xrange(0, self.num_put_threads):
+ thread = threading.Thread(target=self._commit_bufferblock_worker)
+ self._put_threads.append(thread)
+ thread.daemon = True
+ thread.start()
def _block_prefetch_worker(self):
"""The background downloader thread."""
return
self._keep.get(b)
except Exception:
- pass
+ _logger.exception("Exception doing block prefetch")
@synchronized
def start_get_threads(self):
self.stop_threads()
@synchronized
- def repack_small_blocks(self, force=False, sync=False):
+ def repack_small_blocks(self, force=False, sync=False, closed_file_size=0):
"""Packs small blocks together before uploading"""
- # Search blocks ready for getting packed together before being committed to Keep.
- # A WRITABLE block always has an owner.
- # A WRITABLE block with its owner.closed() implies that it's
- # size is <= KEEP_BLOCK_SIZE/2.
- small_blocks = [b for b in self._bufferblocks.values() if b.state() == _BufferBlock.WRITABLE and b.owner.closed()]
- if len(small_blocks) <= 1:
- # Not enough small blocks for repacking
- return
+ self._pending_write_size += closed_file_size
# Check if there are enough small blocks for filling up one in full
- pending_write_size = sum([b.size() for b in small_blocks])
- if force or (pending_write_size >= config.KEEP_BLOCK_SIZE):
- new_bb = _BufferBlock("%s" % uuid.uuid4(), 2**14, None)
- self._bufferblocks[new_bb.blockid] = new_bb
+ if force or (self._pending_write_size >= config.KEEP_BLOCK_SIZE):
+
+ # Search blocks ready for getting packed together before being committed to Keep.
+ # A WRITABLE block always has an owner.
+ # A WRITABLE block with its owner.closed() implies that it's
+ # size is <= KEEP_BLOCK_SIZE/2.
+ small_blocks = [b for b in self._bufferblocks.values() if b.state() == _BufferBlock.WRITABLE and b.owner.closed()]
+
+ if len(small_blocks) <= 1:
+ # Not enough small blocks for repacking
+ return
+
+ # Update the pending write size count with its true value, just in case
+ # some small file was opened, written and closed several times.
+ self._pending_write_size = sum([b.size() for b in small_blocks])
+ if self._pending_write_size < config.KEEP_BLOCK_SIZE and not force:
+ return
+
+ new_bb = self._alloc_bufferblock()
while len(small_blocks) > 0 and (new_bb.write_pointer + small_blocks[0].size()) <= config.KEEP_BLOCK_SIZE:
bb = small_blocks.pop(0)
arvfile = bb.owner
+ self._pending_write_size -= bb.size()
new_bb.append(bb.buffer_view[0:bb.write_pointer].tobytes())
arvfile.set_segments([Range(new_bb.blockid,
0,
bb.size(),
new_bb.write_pointer - bb.size())])
- bb.clear()
- del self._bufferblocks[bb.blockid]
+ self._delete_bufferblock(bb.blockid)
self.commit_bufferblock(new_bb, sync=sync)
def commit_bufferblock(self, block, sync):
@synchronized
def delete_bufferblock(self, locator):
+ self._delete_bufferblock(locator)
+
+ def _delete_bufferblock(self, locator):
bb = self._bufferblocks[locator]
bb.clear()
del self._bufferblocks[locator]
self._writers.add(writer)
@synchronized
- def remove_writer(self, writer):
+ def remove_writer(self, writer, flush):
"""
Called from ArvadosFileWriter.close(). Remove a writer reference from the list
and do some block maintenance tasks.
"""
self._writers.remove(writer)
- if self.size() > config.KEEP_BLOCK_SIZE / 2:
+ if flush or self.size() > config.KEEP_BLOCK_SIZE / 2:
# File writer closed, not small enough for repacking
self.flush()
elif self.closed():
# All writers closed and size is adequate for repacking
- self.parent._my_block_manager().repack_small_blocks()
+ self.parent._my_block_manager().repack_small_blocks(closed_file_size=self.size())
def closed(self):
"""
def flush(self):
self.arvadosfile.flush()
- def close(self):
+ def close(self, flush=True):
if not self.closed:
- self.arvadosfile.remove_writer(self)
+ self.arvadosfile.remove_writer(self, flush)
super(ArvadosFileWriter, self).close()