manifest_list = []
-chunking = False #arvados.getjobparam('chunking')
-
def nextline(reader, start):
n = -1
while True:
start += 128
return n
-# Chunk a fastq into approximately 64 MiB chunks. Requires that the input data
-# be decompressed ahead of time, such as using decompress-all.py. Generates a
-# new manifest, but doesn't actually move any data around. Handles paired
-# reads by ensuring that each chunk of a pair gets the same number of records.
-#
-# This works, but in practice is so slow that potential gains in alignment
-# performance are lost in the prep time, which is why it is currently disabled.
-#
-# A better algorithm would seek to a file position a bit less than the desired
-# chunk size and then scan ahead for the next record, making sure that record
-# was matched by the read pair.
-def splitfastq(p):
- for i in xrange(0, len(p)):
- p[i]["start"] = 0
- p[i]["end"] = 0
-
- count = 0
- recordsize = [0, 0]
-
- global piece
- finish = False
- while not finish:
- for i in xrange(0, len(p)):
- recordsize[i] = 0
-
- # read next 4 lines
- for i in xrange(0, len(p)):
- for ln in xrange(0, 4):
- r = nextline(p[i]["reader"], p[i]["end"]+recordsize[i])
- if r == -1:
- finish = True
- break
- recordsize[i] += (r+1)
-
- splitnow = finish
- for i in xrange(0, len(p)):
- if ((p[i]["end"] - p[i]["start"]) + recordsize[i]) >= (64*1024*1024):
- splitnow = True
-
- if splitnow:
- for i in xrange(0, len(p)):
- global manifest_list
- print >>sys.stderr, "Finish piece ./_%s/%s (%s %s)" % (piece, p[i]["reader"].name(), p[i]["start"], p[i]["end"])
- manifest = []
- manifest.extend(["./_" + str(piece)])
- manifest.extend([d[arvados.LOCATOR] for d in p[i]["reader"]._stream._data_locators])
- manifest.extend(["{}:{}:{}".format(seg[arvados.LOCATOR]+seg[arvados.OFFSET], seg[arvados.SEGMENTSIZE], p[i]["reader"].name().replace(' ', '\\040')) for seg in arvados.locators_and_ranges(p[i]["reader"].segments, p[i]["start"], p[i]["end"] - p[i]["start"])])
- manifest_list.append(manifest)
- p[i]["start"] = p[i]["end"]
- piece += 1
- else:
- for i in xrange(0, len(p)):
- p[i]["end"] += recordsize[i]
- count += 1
- if count % 10000 == 0:
- print >>sys.stderr, "Record %s at %s" % (count, p[i]["end"])
-
prog = re.compile(r'(.*?)(_[12])?\.fastq(\.gz)?$')
# Look for fastq files
p[0]["reader"] = s.files()[name_pieces.group(0)]
if p is not None:
- if chunking:
- splitfastq(p)
- else:
- for i in xrange(0, len(p)):
- m = p[i]["reader"].as_manifest().split()
- m[0] = "./_" + str(piece)
- manifest_list.append(m)
- piece += 1
+ for i in xrange(0, len(p)):
+ m = p[i]["reader"].as_manifest().split()
+ m[0] = "./_" + str(piece)
+ manifest_list.append(m)
+ piece += 1
manifest_text = "\n".join(" ".join(m) for m in manifest_list) + "\n"
import time
import threading
-from api import *
-from collection import *
+from .api import api, http_cache
+from collection import CollectionReader, CollectionWriter, ResumableCollectionWriter
from keep import *
from stream import *
+from arvfile import StreamFileReader
import errors
import util
body={'success':True}
).execute()
exit(0)
-
-
--- /dev/null
+def normalize_stream(stream_name, stream):
+ """Take manifest stream and return a list of tokens in normalized format.
+
+ :stream_name:
+ The name of the stream.
+
+ :stream:
+ A dict mapping each filename to a list of `_range.LocatorAndRange` objects.
+
+ """
+
+ stream_name = stream_name.replace(' ', '\\040')
+ stream_tokens = [stream_name]
+ sortedfiles = list(stream.keys())
+ sortedfiles.sort()
+
+ blocks = {}
+ streamoffset = 0L
+ # Go through each file and add each referenced block exactly once.
+ for streamfile in sortedfiles:
+ for segment in stream[streamfile]:
+ if segment.locator not in blocks:
+ stream_tokens.append(segment.locator)
+ blocks[segment.locator] = streamoffset
+ streamoffset += segment.block_size
+
+ # Add the empty block if the stream is otherwise empty.
+ if len(stream_tokens) == 1:
+ stream_tokens.append(config.EMPTY_BLOCK_LOCATOR)
+
+ for streamfile in sortedfiles:
+ # Add in file segments
+ current_span = None
+ fout = streamfile.replace(' ', '\\040')
+ for segment in stream[streamfile]:
+ # Collapse adjacent segments
+ streamoffset = blocks[segment.locator] + segment.segment_offset
+ if current_span is None:
+ current_span = [streamoffset, streamoffset + segment.segment_size]
+ else:
+ if streamoffset == current_span[1]:
+ current_span[1] += segment.segment_size
+ else:
+ stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
+ current_span = [streamoffset, streamoffset + segment.segment_size]
+
+ if current_span is not None:
+ stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
+
+ if not stream[streamfile]:
+ stream_tokens.append("0:0:{0}".format(fout))
+
+ return stream_tokens
--- /dev/null
+import logging
+
+_logger = logging.getLogger('arvados.ranges')
+
+class Range(object):
+ def __init__(self, locator, range_start, range_size, segment_offset=0):
+ self.locator = locator
+ self.range_start = range_start
+ self.range_size = range_size
+ self.segment_offset = segment_offset
+
+ def __repr__(self):
+ return "Range(%r, %r, %r, %r)" % (self.locator, self.range_start, self.range_size, self.segment_offset)
+
+ def __eq__(self, other):
+ return (self.locator == other.locator and
+ self.range_start == other.range_start and
+ self.range_size == other.range_size and
+ self.segment_offset == other.segment_offset)
+
+def first_block(data_locators, range_start, range_size):
+ block_start = 0L
+
+ # range_start/block_start is the inclusive lower bound
+ # range_end/block_end is the exclusive upper bound
+
+ hi = len(data_locators)
+ lo = 0
+ i = int((hi + lo) / 2)
+ block_size = data_locators[i].range_size
+ block_start = data_locators[i].range_start
+ block_end = block_start + block_size
+
+ # perform a binary search for the first block
+ # assumes that all of the blocks are contigious, so range_start is guaranteed
+ # to either fall into the range of a block or be outside the block range entirely
+ while not (range_start >= block_start and range_start < block_end):
+ if lo == i:
+ # must be out of range, fail
+ return None
+ if range_start > block_start:
+ lo = i
+ else:
+ hi = i
+ i = int((hi + lo) / 2)
+ block_size = data_locators[i].range_size
+ block_start = data_locators[i].range_start
+ block_end = block_start + block_size
+
+ return i
+
+class LocatorAndRange(object):
+ def __init__(self, locator, block_size, segment_offset, segment_size):
+ self.locator = locator
+ self.block_size = block_size
+ self.segment_offset = segment_offset
+ self.segment_size = segment_size
+
+ def __eq__(self, other):
+ return (self.locator == other.locator and
+ self.block_size == other.block_size and
+ self.segment_offset == other.segment_offset and
+ self.segment_size == other.segment_size)
+
+ def __repr__(self):
+ return "LocatorAndRange(%r, %r, %r, %r)" % (self.locator, self.block_size, self.segment_offset, self.segment_size)
+
+def locators_and_ranges(data_locators, range_start, range_size):
+ """Get blocks that are covered by the range and return list of LocatorAndRange
+ objects.
+
+ :data_locators:
+ list of Range objects, assumes that blocks are in order and contigous
+
+ :range_start:
+ start of range
+
+ :range_size:
+ size of range
+
+ """
+ if range_size == 0:
+ return []
+ resp = []
+ range_start = range_start
+ range_size = range_size
+ range_end = range_start + range_size
+
+ i = first_block(data_locators, range_start, range_size)
+ if i is None:
+ return []
+
+ # We should always start at the first segment due to the binary
+ # search.
+ while i < len(data_locators):
+ dl = data_locators[i]
+ block_start = dl.range_start
+ block_size = dl.range_size
+ block_end = block_start + block_size
+ _logger.debug(dl.locator, "range_start", range_start, "block_start", block_start, "range_end", range_end, "block_end", block_end)
+ if range_end <= block_start:
+ # range ends before this block starts, so don't look at any more locators
+ break
+
+ if range_start >= block_start and range_end <= block_end:
+ # range starts and ends in this block
+ resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset + (range_start - block_start), range_size))
+ elif range_start >= block_start and range_end > block_end:
+ # range starts in this block
+ resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset + (range_start - block_start), block_end - range_start))
+ elif range_start < block_start and range_end > block_end:
+ # range starts in a previous block and extends to further blocks
+ resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset, block_size))
+ elif range_start < block_start and range_end <= block_end:
+ # range starts in a previous block and ends in this block
+ resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset, range_end - block_start))
+ block_start = block_end
+ i += 1
+ return resp
+
+def replace_range(data_locators, new_range_start, new_range_size, new_locator, new_segment_offset):
+ """
+ Replace a file segment range with a new segment.
+
+ NOTE::
+ data_locators will be updated in place
+
+ :data_locators:
+ list of Range objects, assumes that segments are in order and contigous
+
+ :new_range_start:
+ start of range to replace in data_locators
+
+ :new_range_size:
+ size of range to replace in data_locators
+
+ :new_locator:
+ locator for new segment to be inserted
+
+ :new_segment_offset:
+ segment offset within the locator
+
+ """
+ if new_range_size == 0:
+ return
+
+ new_range_start = new_range_start
+ new_range_size = new_range_size
+ new_range_end = new_range_start + new_range_size
+
+ if len(data_locators) == 0:
+ data_locators.append(Range(new_locator, new_range_start, new_range_size, new_segment_offset))
+ return
+
+ last = data_locators[-1]
+ if (last.range_start+last.range_size) == new_range_start:
+ if last.locator == new_locator:
+ # extend last segment
+ last.range_size += new_range_size
+ else:
+ data_locators.append(Range(new_locator, new_range_start, new_range_size, new_segment_offset))
+ return
+
+ i = first_block(data_locators, new_range_start, new_range_size)
+ if i is None:
+ return
+
+ # We should always start at the first segment due to the binary
+ # search.
+ while i < len(data_locators):
+ dl = data_locators[i]
+ old_segment_start = dl.range_start
+ old_segment_end = old_segment_start + dl.range_size
+ _logger.debug(dl, "range_start", new_range_start, "segment_start", old_segment_start, "range_end", new_range_end, "segment_end", old_segment_end)
+ if new_range_end <= old_segment_start:
+ # range ends before this segment starts, so don't look at any more locators
+ break
+
+ if old_segment_start <= new_range_start and new_range_end <= old_segment_end:
+ # new range starts and ends in old segment
+ # split segment into up to 3 pieces
+ if (new_range_start-old_segment_start) > 0:
+ data_locators[i] = Range(dl.locator, old_segment_start, (new_range_start-old_segment_start), dl.segment_offset)
+ data_locators.insert(i+1, Range(new_locator, new_range_start, new_range_size, new_segment_offset))
+ else:
+ data_locators[i] = Range(new_locator, new_range_start, new_range_size, new_segment_offset)
+ i -= 1
+ if (old_segment_end-new_range_end) > 0:
+ data_locators.insert(i+2, Range(dl.locator, new_range_end, (old_segment_end-new_range_end), dl.segment_offset + (new_range_start-old_segment_start) + new_range_size))
+ return
+ elif old_segment_start <= new_range_start and new_range_end > old_segment_end:
+ # range starts in this segment
+ # split segment into 2 pieces
+ data_locators[i] = Range(dl.locator, old_segment_start, (new_range_start-old_segment_start), dl.segment_offset)
+ data_locators.insert(i+1, Range(new_locator, new_range_start, new_range_size, new_segment_offset))
+ i += 1
+ elif new_range_start < old_segment_start and new_range_end >= old_segment_end:
+ # range starts in a previous segment and extends to further segments
+ # delete this segment
+ del data_locators[i]
+ i -= 1
+ elif new_range_start < old_segment_start and new_range_end < old_segment_end:
+ # range starts in a previous segment and ends in this segment
+ # move the starting point of this segment up, and shrink it.
+ data_locators[i] = Range(dl.locator, new_range_end, (old_segment_end-new_range_end), dl.segment_offset + (new_range_end-old_segment_start))
+ return
+ i += 1
def api(version=None, cache=True, host=None, token=None, insecure=False, **kwargs):
"""Return an apiclient Resources object for an Arvados instance.
- Arguments:
- * version: A string naming the version of the Arvados API to use (for
+ :version:
+ A string naming the version of the Arvados API to use (for
example, 'v1').
- * cache: Use a cache (~/.cache/arvados/discovery) for the discovery
+
+ :cache:
+ Use a cache (~/.cache/arvados/discovery) for the discovery
document.
- * host: The Arvados API server host (and optional :port) to connect to.
- * token: The authentication token to send with each API call.
- * insecure: If True, ignore SSL certificate validation errors.
+
+ :host:
+ The Arvados API server host (and optional :port) to connect to.
+
+ :token:
+ The authentication token to send with each API call.
+
+ :insecure:
+ If True, ignore SSL certificate validation errors.
Additional keyword arguments will be passed directly to
`apiclient_discovery.build` if a new Resource object is created.
elif host and token:
pass
elif not host and not token:
- # Load from user configuration or environment
- for x in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:
- if x not in config.settings():
- raise ValueError("%s is not set. Aborting." % x)
- host = config.get('ARVADOS_API_HOST')
- token = config.get('ARVADOS_API_TOKEN')
- insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE')
+ return api_from_config(version=version, cache=cache, **kwargs)
else:
# Caller provided one but not the other
if not host:
svc.api_token = token
kwargs['http'].cache = None
return svc
+
+def api_from_config(version=None, apiconfig=None, **kwargs):
+ """Return an apiclient Resources object enabling access to an Arvados server
+ instance.
+
+ :version:
+ A string naming the version of the Arvados REST API to use (for
+ example, 'v1').
+
+ :cache:
+ Use a cache (~/.cache/arvados/discovery) for the discovery
+ document.
+
+ :apiconfig:
+ If provided, this should be a dict-like object (must support the get()
+ method) with entries for ARVADOS_API_HOST, ARVADOS_API_TOKEN, and
+ optionally ARVADOS_API_HOST_INSECURE. If not provided, use
+ arvados.config (which gets these parameters from the environment by
+ default.)
+
+ """
+ # Load from user configuration or environment
+ if apiconfig is None:
+ apiconfig = config.settings()
+
+ for x in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:
+ if x not in apiconfig:
+ raise ValueError("%s is not set. Aborting." % x)
+ host = apiconfig.get('ARVADOS_API_HOST')
+ token = apiconfig.get('ARVADOS_API_TOKEN')
+ insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE', apiconfig)
+
+ return api(version=version, host=host, token=token, insecure=insecure, **kwargs)
import functools
+import os
+import zlib
+import bz2
+from ._ranges import locators_and_ranges, replace_range, Range
+from arvados.retry import retry_method
+import config
+import hashlib
+import threading
+import Queue
+import copy
+import errno
+from .errors import KeepWriteError, AssertionError
+from .keep import KeepLocator
+from _normalize_stream import normalize_stream
-class ArvadosFileBase(object):
+def split(path):
+ """split(path) -> streamname, filename
+
+ Separate the stream name and file name in a /-separated stream path and
+ return a tuple (stream_name, file_name). If no stream name is available,
+ assume '.'.
+
+ """
+ try:
+ stream_name, file_name = path.rsplit('/', 1)
+ except ValueError: # No / in string
+ stream_name, file_name = '.', path
+ return stream_name, file_name
+
+class _FileLikeObjectBase(object):
def __init__(self, name, mode):
self.name = name
self.mode = mode
@staticmethod
def _before_close(orig_func):
@functools.wraps(orig_func)
- def wrapper(self, *args, **kwargs):
+ def before_close_wrapper(self, *args, **kwargs):
if self.closed:
raise ValueError("I/O operation on closed stream file")
return orig_func(self, *args, **kwargs)
- return wrapper
+ return before_close_wrapper
def __enter__(self):
return self
def close(self):
self.closed = True
+
+
+class ArvadosFileReaderBase(_FileLikeObjectBase):
+ def __init__(self, name, mode, num_retries=None):
+ super(ArvadosFileReaderBase, self).__init__(name, mode)
+ self._filepos = 0L
+ self.num_retries = num_retries
+ self._readline_cache = (None, None)
+
+ def __iter__(self):
+ while True:
+ data = self.readline()
+ if not data:
+ break
+ yield data
+
+ def decompressed_name(self):
+ return re.sub('\.(bz2|gz)$', '', self.name)
+
+ @_FileLikeObjectBase._before_close
+ def seek(self, pos, whence=os.SEEK_CUR):
+ if whence == os.SEEK_CUR:
+ pos += self._filepos
+ elif whence == os.SEEK_END:
+ pos += self.size()
+ self._filepos = min(max(pos, 0L), self.size())
+
+ def tell(self):
+ return self._filepos
+
+ @_FileLikeObjectBase._before_close
+ @retry_method
+ def readall(self, size=2**20, num_retries=None):
+ while True:
+ data = self.read(size, num_retries=num_retries)
+ if data == '':
+ break
+ yield data
+
+ @_FileLikeObjectBase._before_close
+ @retry_method
+ def readline(self, size=float('inf'), num_retries=None):
+ cache_pos, cache_data = self._readline_cache
+ if self.tell() == cache_pos:
+ data = [cache_data]
+ else:
+ data = ['']
+ data_size = len(data[-1])
+ while (data_size < size) and ('\n' not in data[-1]):
+ next_read = self.read(2 ** 20, num_retries=num_retries)
+ if not next_read:
+ break
+ data.append(next_read)
+ data_size += len(next_read)
+ data = ''.join(data)
+ try:
+ nextline_index = data.index('\n') + 1
+ except ValueError:
+ nextline_index = len(data)
+ nextline_index = min(nextline_index, size)
+ self._readline_cache = (self.tell(), data[nextline_index:])
+ return data[:nextline_index]
+
+ @_FileLikeObjectBase._before_close
+ @retry_method
+ def decompress(self, decompress, size, num_retries=None):
+ for segment in self.readall(size, num_retries):
+ data = decompress(segment)
+ if data:
+ yield data
+
+ @_FileLikeObjectBase._before_close
+ @retry_method
+ def readall_decompressed(self, size=2**20, num_retries=None):
+ self.seek(0)
+ if self.name.endswith('.bz2'):
+ dc = bz2.BZ2Decompressor()
+ return self.decompress(dc.decompress, size,
+ num_retries=num_retries)
+ elif self.name.endswith('.gz'):
+ dc = zlib.decompressobj(16+zlib.MAX_WBITS)
+ return self.decompress(lambda segment: dc.decompress(dc.unconsumed_tail + segment),
+ size, num_retries=num_retries)
+ else:
+ return self.readall(size, num_retries=num_retries)
+
+ @_FileLikeObjectBase._before_close
+ @retry_method
+ def readlines(self, sizehint=float('inf'), num_retries=None):
+ data = []
+ data_size = 0
+ for s in self.readall(num_retries=num_retries):
+ data.append(s)
+ data_size += len(s)
+ if data_size >= sizehint:
+ break
+ return ''.join(data).splitlines(True)
+
+ def size(self):
+ raise NotImplementedError()
+
+ def read(self, size, num_retries=None):
+ raise NotImplementedError()
+
+ def readfrom(self, start, size, num_retries=None):
+ raise NotImplementedError()
+
+
+class StreamFileReader(ArvadosFileReaderBase):
+ class _NameAttribute(str):
+ # The Python file API provides a plain .name attribute.
+ # Older SDK provided a name() method.
+ # This class provides both, for maximum compatibility.
+ def __call__(self):
+ return self
+
+ def __init__(self, stream, segments, name):
+ super(StreamFileReader, self).__init__(self._NameAttribute(name), 'rb', num_retries=stream.num_retries)
+ self._stream = stream
+ self.segments = segments
+
+ def stream_name(self):
+ return self._stream.name()
+
+ def size(self):
+ n = self.segments[-1]
+ return n.range_start + n.range_size
+
+ @_FileLikeObjectBase._before_close
+ @retry_method
+ def read(self, size, num_retries=None):
+ """Read up to 'size' bytes from the stream, starting at the current file position"""
+ if size == 0:
+ return ''
+
+ data = ''
+ available_chunks = locators_and_ranges(self.segments, self._filepos, size)
+ if available_chunks:
+ lr = available_chunks[0]
+ data = self._stream.readfrom(lr.locator+lr.segment_offset,
+ lr.segment_size,
+ num_retries=num_retries)
+
+ self._filepos += len(data)
+ return data
+
+ @_FileLikeObjectBase._before_close
+ @retry_method
+ def readfrom(self, start, size, num_retries=None):
+ """Read up to 'size' bytes from the stream, starting at 'start'"""
+ if size == 0:
+ return ''
+
+ data = []
+ for lr in locators_and_ranges(self.segments, start, size):
+ data.append(self._stream.readfrom(lr.locator+lr.segment_offset, lr.segment_size,
+ num_retries=num_retries))
+ return ''.join(data)
+
+ def as_manifest(self):
+ segs = []
+ for r in self.segments:
+ segs.extend(self._stream.locators_and_ranges(r.locator, r.range_size))
+ return " ".join(normalize_stream(".", {self.name: segs})) + "\n"
+
+
+def synchronized(orig_func):
+ @functools.wraps(orig_func)
+ def synchronized_wrapper(self, *args, **kwargs):
+ with self.lock:
+ return orig_func(self, *args, **kwargs)
+ return synchronized_wrapper
+
+class _BufferBlock(object):
+ """A stand-in for a Keep block that is in the process of being written.
+
+ Writers can append to it, get the size, and compute the Keep locator.
+ There are three valid states:
+
+ WRITABLE
+ Can append to block.
+
+ PENDING
+ Block is in the process of being uploaded to Keep, append is an error.
+
+ COMMITTED
+ The block has been written to Keep, its internal buffer has been
+ released, fetching the block will fetch it via keep client (since we
+ discarded the internal copy), and identifiers referring to the BufferBlock
+ can be replaced with the block locator.
+
+ """
+
+ WRITABLE = 0
+ PENDING = 1
+ COMMITTED = 2
+
+ def __init__(self, blockid, starting_capacity, owner):
+ """
+ :blockid:
+ the identifier for this block
+
+ :starting_capacity:
+ the initial buffer capacity
+
+ :owner:
+ ArvadosFile that owns this block
+
+ """
+ self.blockid = blockid
+ self.buffer_block = bytearray(starting_capacity)
+ self.buffer_view = memoryview(self.buffer_block)
+ self.write_pointer = 0
+ self._state = _BufferBlock.WRITABLE
+ self._locator = None
+ self.owner = owner
+ self.lock = threading.Lock()
+
+ @synchronized
+ def append(self, data):
+ """Append some data to the buffer.
+
+ Only valid if the block is in WRITABLE state. Implements an expanding
+ buffer, doubling capacity as needed to accomdate all the data.
+
+ """
+ if self._state == _BufferBlock.WRITABLE:
+ while (self.write_pointer+len(data)) > len(self.buffer_block):
+ new_buffer_block = bytearray(len(self.buffer_block) * 2)
+ new_buffer_block[0:self.write_pointer] = self.buffer_block[0:self.write_pointer]
+ self.buffer_block = new_buffer_block
+ self.buffer_view = memoryview(self.buffer_block)
+ self.buffer_view[self.write_pointer:self.write_pointer+len(data)] = data
+ self.write_pointer += len(data)
+ self._locator = None
+ else:
+ raise AssertionError("Buffer block is not writable")
+
+ @synchronized
+ def set_state(self, nextstate, loc=None):
+ if ((self._state == _BufferBlock.WRITABLE and nextstate == _BufferBlock.PENDING) or
+ (self._state == _BufferBlock.PENDING and nextstate == _BufferBlock.COMMITTED)):
+ self._state = nextstate
+ if self._state == _BufferBlock.COMMITTED:
+ self._locator = loc
+ self.buffer_view = None
+ self.buffer_block = None
+ else:
+ raise AssertionError("Invalid state change from %s to %s" % (self.state, state))
+
+ @synchronized
+ def state(self):
+ return self._state
+
+ def size(self):
+ """The amount of data written to the buffer."""
+ return self.write_pointer
+
+ @synchronized
+ def locator(self):
+ """The Keep locator for this buffer's contents."""
+ if self._locator is None:
+ self._locator = "%s+%i" % (hashlib.md5(self.buffer_view[0:self.write_pointer]).hexdigest(), self.size())
+ return self._locator
+
+ @synchronized
+ def clone(self, new_blockid, owner):
+ if self._state == _BufferBlock.COMMITTED:
+ raise AssertionError("Can only duplicate a writable or pending buffer block")
+ bufferblock = _BufferBlock(new_blockid, self.size(), owner)
+ bufferblock.append(self.buffer_view[0:self.size()])
+ return bufferblock
+
+
+class NoopLock(object):
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+ def acquire(self, blocking=False):
+ pass
+
+ def release(self):
+ pass
+
+def must_be_writable(orig_func):
+ @functools.wraps(orig_func)
+ def must_be_writable_wrapper(self, *args, **kwargs):
+ if not self.writable():
+ raise IOError((errno.EROFS, "Collection must be writable."))
+ return orig_func(self, *args, **kwargs)
+ return must_be_writable_wrapper
+
+
+class _BlockManager(object):
+ """BlockManager handles buffer blocks.
+
+ Also handles background block uploads, and background block prefetch for a
+ Collection of ArvadosFiles.
+
+ """
+ def __init__(self, keep):
+ """keep: KeepClient object to use"""
+ self._keep = keep
+ self._bufferblocks = {}
+ self._put_queue = None
+ self._put_errors = None
+ self._put_threads = None
+ self._prefetch_queue = None
+ self._prefetch_threads = None
+ self.lock = threading.Lock()
+ self.prefetch_enabled = True
+ self.num_put_threads = 2
+ self.num_get_threads = 2
+
+ @synchronized
+ def alloc_bufferblock(self, blockid=None, starting_capacity=2**14, owner=None):
+ """Allocate a new, empty bufferblock in WRITABLE state and return it.
+
+ :blockid:
+ optional block identifier, otherwise one will be automatically assigned
+
+ :starting_capacity:
+ optional capacity, otherwise will use default capacity
+
+ :owner:
+ ArvadosFile that owns this block
+
+ """
+ if blockid is None:
+ blockid = "bufferblock%i" % len(self._bufferblocks)
+ bufferblock = _BufferBlock(blockid, starting_capacity=starting_capacity, owner=owner)
+ self._bufferblocks[bufferblock.blockid] = bufferblock
+ return bufferblock
+
+ @synchronized
+ def dup_block(self, block, owner):
+ """Create a new bufferblock initialized with the content of an existing bufferblock.
+
+ :block:
+ the buffer block to copy.
+
+ :owner:
+ ArvadosFile that owns the new block
+
+ """
+ new_blockid = "bufferblock%i" % len(self._bufferblocks)
+ bufferblock = block.clone(new_blockid, owner)
+ self._bufferblocks[bufferblock.blockid] = bufferblock
+ return bufferblock
+
+ @synchronized
+ def is_bufferblock(self, locator):
+ return locator in self._bufferblocks
+
+ @synchronized
+ def stop_threads(self):
+ """Shut down and wait for background upload and download threads to finish."""
+
+ if self._put_threads is not None:
+ for t in self._put_threads:
+ self._put_queue.put(None)
+ for t in self._put_threads:
+ t.join()
+ self._put_threads = None
+ self._put_queue = None
+ self._put_errors = None
+
+ if self._prefetch_threads is not None:
+ for t in self._prefetch_threads:
+ self._prefetch_queue.put(None)
+ for t in self._prefetch_threads:
+ t.join()
+ self._prefetch_threads = None
+ self._prefetch_queue = None
+
+ def commit_bufferblock(self, block):
+ """Initiate a background upload of a bufferblock.
+
+ This will block if the upload queue is at capacity, otherwise it will
+ return immediately.
+
+ """
+
+ def commit_bufferblock_worker(self):
+ """Background uploader thread."""
+
+ while True:
+ try:
+ bufferblock = self._put_queue.get()
+ if bufferblock is None:
+ return
+ loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes())
+ bufferblock.set_state(_BufferBlock.COMMITTED, loc)
+
+ except Exception as e:
+ self._put_errors.put((bufferblock.locator(), e))
+ finally:
+ if self._put_queue is not None:
+ self._put_queue.task_done()
+
+ with self.lock:
+ if self._put_threads is None:
+ # Start uploader threads.
+
+ # If we don't limit the Queue size, the upload queue can quickly
+ # grow to take up gigabytes of RAM if the writing process is
+ # generating data more quickly than it can be send to the Keep
+ # servers.
+ #
+ # With two upload threads and a queue size of 2, this means up to 4
+ # blocks pending. If they are full 64 MiB blocks, that means up to
+ # 256 MiB of internal buffering, which is the same size as the
+ # default download block cache in KeepClient.
+ self._put_queue = Queue.Queue(maxsize=2)
+ self._put_errors = Queue.Queue()
+
+ self._put_threads = []
+ for i in xrange(0, self.num_put_threads):
+ thread = threading.Thread(target=commit_bufferblock_worker, args=(self,))
+ self._put_threads.append(thread)
+ thread.daemon = True
+ thread.start()
+
+ # Mark the block as PENDING so to disallow any more appends.
+ block.set_state(_BufferBlock.PENDING)
+ self._put_queue.put(block)
+
+ @synchronized
+ def get_bufferblock(self, locator):
+ return self._bufferblocks.get(locator)
+
+ def get_block_contents(self, locator, num_retries, cache_only=False):
+ """Fetch a block.
+
+ First checks to see if the locator is a BufferBlock and return that, if
+ not, passes the request through to KeepClient.get().
+
+ """
+ with self.lock:
+ if locator in self._bufferblocks:
+ bufferblock = self._bufferblocks[locator]
+ if bufferblock.state() != _BufferBlock.COMMITTED:
+ return bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes()
+ else:
+ locator = bufferblock._locator
+ if cache_only:
+ return self._keep.get_from_cache(locator)
+ else:
+ return self._keep.get(locator, num_retries=num_retries)
+
+ def commit_all(self):
+ """Commit all outstanding buffer blocks.
+
+ Unlike commit_bufferblock(), this is a synchronous call, and will not
+ return until all buffer blocks are uploaded. Raises
+ KeepWriteError() if any blocks failed to upload.
+
+ """
+ with self.lock:
+ items = self._bufferblocks.items()
+
+ for k,v in items:
+ if v.state() == _BufferBlock.WRITABLE:
+ self.commit_bufferblock(v)
+
+ with self.lock:
+ if self._put_queue is not None:
+ self._put_queue.join()
+
+ if not self._put_errors.empty():
+ err = []
+ try:
+ while True:
+ err.append(self._put_errors.get(False))
+ except Queue.Empty:
+ pass
+ raise KeepWriteError("Error writing some blocks", err)
+
+ def block_prefetch(self, locator):
+ """Initiate a background download of a block.
+
+ This assumes that the underlying KeepClient implements a block cache,
+ so repeated requests for the same block will not result in repeated
+ downloads (unless the block is evicted from the cache.) This method
+ does not block.
+
+ """
+
+ if not self.prefetch_enabled:
+ return
+
+ def block_prefetch_worker(self):
+ """The background downloader thread."""
+ while True:
+ try:
+ b = self._prefetch_queue.get()
+ if b is None:
+ return
+ self._keep.get(b)
+ except Exception:
+ pass
+
+ with self.lock:
+ if locator in self._bufferblocks:
+ return
+ if self._prefetch_threads is None:
+ self._prefetch_queue = Queue.Queue()
+ self._prefetch_threads = []
+ for i in xrange(0, self.num_get_threads):
+ thread = threading.Thread(target=block_prefetch_worker, args=(self,))
+ self._prefetch_threads.append(thread)
+ thread.daemon = True
+ thread.start()
+ self._prefetch_queue.put(locator)
+
+
+class ArvadosFile(object):
+ """Represent a file in a Collection.
+
+ ArvadosFile manages the underlying representation of a file in Keep as a
+ sequence of segments spanning a set of blocks, and implements random
+ read/write access.
+
+ This object may be accessed from multiple threads.
+
+ """
+
+ def __init__(self, parent, stream=[], segments=[]):
+ """
+ ArvadosFile constructor.
+
+ :stream:
+ a list of Range objects representing a block stream
+
+ :segments:
+ a list of Range objects representing segments
+ """
+ self.parent = parent
+ self._modified = True
+ self._segments = []
+ self.lock = parent.root_collection().lock
+ for s in segments:
+ self._add_segment(stream, s.locator, s.range_size)
+ self._current_bblock = None
+
+ def writable(self):
+ return self.parent.writable()
+
+ @synchronized
+ def segments(self):
+ return copy.copy(self._segments)
+
+ @synchronized
+ def clone(self, new_parent):
+ """Make a copy of this file."""
+ cp = ArvadosFile(new_parent)
+ cp.replace_contents(self)
+ return cp
+
+ @must_be_writable
+ @synchronized
+ def replace_contents(self, other):
+ """Replace segments of this file with segments from another `ArvadosFile` object."""
+
+ map_loc = {}
+ self._segments = []
+ for other_segment in other.segments():
+ new_loc = other_segment.locator
+ if other.parent._my_block_manager().is_bufferblock(other_segment.locator):
+ if other_segment.locator not in map_loc:
+ bufferblock = other.parent._my_block_manager().get_bufferblock(other_segment.locator)
+ if bufferblock.state() != _BufferBlock.WRITABLE:
+ map_loc[other_segment.locator] = bufferblock.locator()
+ else:
+ map_loc[other_segment.locator] = self.parent._my_block_manager().dup_block(bufferblock, self).blockid
+ new_loc = map_loc[other_segment.locator]
+
+ self._segments.append(Range(new_loc, other_segment.range_start, other_segment.range_size, other_segment.segment_offset))
+
+ self._modified = True
+
+ def __eq__(self, other):
+ if other is self:
+ return True
+ if not isinstance(other, ArvadosFile):
+ return False
+
+ othersegs = other.segments()
+ with self.lock:
+ if len(self._segments) != len(othersegs):
+ return False
+ for i in xrange(0, len(othersegs)):
+ seg1 = self._segments[i]
+ seg2 = othersegs[i]
+ loc1 = seg1.locator
+ loc2 = seg2.locator
+
+ if self.parent._my_block_manager().is_bufferblock(loc1):
+ loc1 = self.parent._my_block_manager().get_bufferblock(loc1).locator()
+
+ if other.parent._my_block_manager().is_bufferblock(loc2):
+ loc2 = other.parent._my_block_manager().get_bufferblock(loc2).locator()
+
+ if (KeepLocator(loc1).stripped() != KeepLocator(loc2).stripped() or
+ seg1.range_start != seg2.range_start or
+ seg1.range_size != seg2.range_size or
+ seg1.segment_offset != seg2.segment_offset):
+ return False
+
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ @synchronized
+ def set_unmodified(self):
+ """Clear the modified flag"""
+ self._modified = False
+
+ @synchronized
+ def modified(self):
+ """Test the modified flag"""
+ return self._modified
+
+ @must_be_writable
+ @synchronized
+ def truncate(self, size):
+ """Shrink the size of the file.
+
+ If `size` is less than the size of the file, the file contents after
+ `size` will be discarded. If `size` is greater than the current size
+ of the file, an IOError will be raised.
+
+ """
+ if size < self.size():
+ new_segs = []
+ for r in self._segments:
+ range_end = r.range_start+r.range_size
+ if r.range_start >= size:
+ # segment is past the trucate size, all done
+ break
+ elif size < range_end:
+ nr = Range(r.locator, r.range_start, size - r.range_start)
+ nr.segment_offset = r.segment_offset
+ new_segs.append(nr)
+ break
+ else:
+ new_segs.append(r)
+
+ self._segments = new_segs
+ self._modified = True
+ elif size > self.size():
+ raise IOError("truncate() does not support extending the file size")
+
+ def readfrom(self, offset, size, num_retries):
+ """Read upto `size` bytes from the file starting at `offset`."""
+
+ with self.lock:
+ if size == 0 or offset >= self.size():
+ return ''
+ prefetch = locators_and_ranges(self._segments, offset, size + config.KEEP_BLOCK_SIZE)
+ readsegs = locators_and_ranges(self._segments, offset, size)
+
+ for lr in prefetch:
+ self.parent._my_block_manager().block_prefetch(lr.locator)
+
+ data = []
+ for lr in readsegs:
+ block = self.parent._my_block_manager().get_block_contents(lr.locator, num_retries=num_retries, cache_only=bool(data))
+ if block:
+ data.append(block[lr.segment_offset:lr.segment_offset+lr.segment_size])
+ else:
+ break
+ return ''.join(data)
+
+ def _repack_writes(self):
+ """Test if the buffer block has more data than actual segments.
+
+ This happens when a buffered write over-writes a file range written in
+ a previous buffered write. Re-pack the buffer block for efficiency
+ and to avoid leaking information.
+
+ """
+ segs = self._segments
+
+ # Sum up the segments to get the total bytes of the file referencing
+ # into the buffer block.
+ bufferblock_segs = [s for s in segs if s.locator == self._current_bblock.blockid]
+ write_total = sum([s.range_size for s in bufferblock_segs])
+
+ if write_total < self._current_bblock.size():
+ # There is more data in the buffer block than is actually accounted for by segments, so
+ # re-pack into a new buffer by copying over to a new buffer block.
+ new_bb = self.parent._my_block_manager().alloc_bufferblock(self._current_bblock.blockid, starting_capacity=write_total, owner=self)
+ for t in bufferblock_segs:
+ new_bb.append(self._current_bblock.buffer_view[t.segment_offset:t.segment_offset+t.range_size].tobytes())
+ t.segment_offset = new_bb.size() - t.range_size
+
+ self._current_bblock = new_bb
+
+ @must_be_writable
+ @synchronized
+ def writeto(self, offset, data, num_retries):
+ """Write `data` to the file starting at `offset`.
+
+ This will update existing bytes and/or extend the size of the file as
+ necessary.
+
+ """
+ if len(data) == 0:
+ return
+
+ if offset > self.size():
+ raise ArgumentError("Offset is past the end of the file")
+
+ if len(data) > config.KEEP_BLOCK_SIZE:
+ raise ArgumentError("Please append data in chunks smaller than %i bytes (config.KEEP_BLOCK_SIZE)" % (config.KEEP_BLOCK_SIZE))
+
+ self._modified = True
+
+ if self._current_bblock is None or self._current_bblock.state() != _BufferBlock.WRITABLE:
+ self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)
+
+ if (self._current_bblock.size() + len(data)) > config.KEEP_BLOCK_SIZE:
+ self._repack_writes()
+ if (self._current_bblock.size() + len(data)) > config.KEEP_BLOCK_SIZE:
+ self.parent._my_block_manager().commit_bufferblock(self._current_bblock)
+ self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)
+
+ self._current_bblock.append(data)
+
+ replace_range(self._segments, offset, len(data), self._current_bblock.blockid, self._current_bblock.write_pointer - len(data))
+
+ @must_be_writable
+ @synchronized
+ def add_segment(self, blocks, pos, size):
+ """Add a segment to the end of the file.
+
+ `pos` and `offset` reference a section of the stream described by
+ `blocks` (a list of Range objects)
+
+ """
+ self._add_segment(blocks, pos, size)
+
+ def _add_segment(self, blocks, pos, size):
+ """Internal implementation of add_segment."""
+ self._modified = True
+ for lr in locators_and_ranges(blocks, pos, size):
+ last = self._segments[-1] if self._segments else Range(0, 0, 0)
+ r = Range(lr.locator, last.range_start+last.range_size, lr.segment_size, lr.segment_offset)
+ self._segments.append(r)
+
+ @synchronized
+ def size(self):
+ """Get the file size."""
+ if self._segments:
+ n = self._segments[-1]
+ return n.range_start + n.range_size
+ else:
+ return 0
+
+
+ @synchronized
+ def manifest_text(self, stream_name=".", portable_locators=False, normalize=False):
+ buf = ""
+ item = self
+ filestream = []
+ for segment in item.segments:
+ loc = segment.locator
+ if loc.startswith("bufferblock"):
+ loc = item._bufferblocks[loc].calculate_locator()
+ if portable_locators:
+ loc = KeepLocator(loc).stripped()
+ filestream.append(LocatorAndRange(loc, locator_block_size(loc),
+ segment.segment_offset, segment.range_size))
+ buf += ' '.join(normalize_stream(stream_name, {stream_name: filestream}))
+ buf += "\n"
+ return buf
+
+
+class ArvadosFileReader(ArvadosFileReaderBase):
+ """Wraps ArvadosFile in a file-like object supporting reading only.
+
+ Be aware that this class is NOT thread safe as there is no locking around
+ updating file pointer.
+
+ """
+
+ def __init__(self, arvadosfile, name, mode="r", num_retries=None):
+ super(ArvadosFileReader, self).__init__(name, mode, num_retries=num_retries)
+ self.arvadosfile = arvadosfile
+
+ def size(self):
+ return self.arvadosfile.size()
+
+ def stream_name(self):
+ return self.arvadosfile.parent.stream_name()
+
+ @_FileLikeObjectBase._before_close
+ @retry_method
+ def read(self, size, num_retries=None):
+ """Read up to `size` bytes from the stream, starting at the current file position."""
+ data = self.arvadosfile.readfrom(self._filepos, size, num_retries)
+ self._filepos += len(data)
+ return data
+
+ @_FileLikeObjectBase._before_close
+ @retry_method
+ def readfrom(self, offset, size, num_retries=None):
+ """Read up to `size` bytes from the stream, starting at the current file position."""
+ return self.arvadosfile.readfrom(offset, size, num_retries)
+
+ def flush(self):
+ pass
+
+
+class ArvadosFileWriter(ArvadosFileReader):
+ """Wraps ArvadosFile in a file-like object supporting both reading and writing.
+
+ Be aware that this class is NOT thread safe as there is no locking around
+ updating file pointer.
+
+ """
+
+ def __init__(self, arvadosfile, name, mode, num_retries=None):
+ super(ArvadosFileWriter, self).__init__(arvadosfile, name, mode, num_retries=num_retries)
+
+ @_FileLikeObjectBase._before_close
+ @retry_method
+ def write(self, data, num_retries=None):
+ if self.mode[0] == "a":
+ self.arvadosfile.writeto(self.size(), data, num_retries)
+ else:
+ self.arvadosfile.writeto(self._filepos, data, num_retries)
+ self._filepos += len(data)
+
+ @_FileLikeObjectBase._before_close
+ @retry_method
+ def writelines(self, seq, num_retries=None):
+ for s in seq:
+ self.write(s, num_retries)
+
+ def truncate(self, size=None):
+ if size is None:
+ size = self._filepos
+ self.arvadosfile.truncate(size)
+ if self._filepos > self.size():
+ self._filepos = self.size()
import logging
import os
import re
+import errno
+import hashlib
+import time
+import threading
from collections import deque
from stat import *
-from .arvfile import ArvadosFileBase
-from keep import *
-from .stream import StreamReader, split
+from .arvfile import split, _FileLikeObjectBase, ArvadosFile, ArvadosFileWriter, ArvadosFileReader, _BlockManager, synchronized, must_be_writable, NoopLock
+from keep import KeepLocator, KeepClient
+from .stream import StreamReader
+from ._normalize_stream import normalize_stream
+from ._ranges import Range, LocatorAndRange
+from .safeapi import ThreadSafeApiCache
import config
import errors
import util
+import events
+from arvados.retry import retry_method
_logger = logging.getLogger('arvados.collection')
-def normalize_stream(s, stream):
- stream_tokens = [s]
- sortedfiles = list(stream.keys())
- sortedfiles.sort()
-
- blocks = {}
- streamoffset = 0L
- for f in sortedfiles:
- for b in stream[f]:
- if b[arvados.LOCATOR] not in blocks:
- stream_tokens.append(b[arvados.LOCATOR])
- blocks[b[arvados.LOCATOR]] = streamoffset
- streamoffset += b[arvados.BLOCKSIZE]
-
- if len(stream_tokens) == 1:
- stream_tokens.append(config.EMPTY_BLOCK_LOCATOR)
-
- for f in sortedfiles:
- current_span = None
- fout = f.replace(' ', '\\040')
- for segment in stream[f]:
- segmentoffset = blocks[segment[arvados.LOCATOR]] + segment[arvados.OFFSET]
- if current_span is None:
- current_span = [segmentoffset, segmentoffset + segment[arvados.SEGMENTSIZE]]
- else:
- if segmentoffset == current_span[1]:
- current_span[1] += segment[arvados.SEGMENTSIZE]
- else:
- stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
- current_span = [segmentoffset, segmentoffset + segment[arvados.SEGMENTSIZE]]
-
- if current_span is not None:
- stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
-
- if not stream[f]:
- stream_tokens.append("0:0:{0}".format(fout))
-
- return stream_tokens
-
-
class CollectionBase(object):
def __enter__(self):
return self
return self._keep_client
def stripped_manifest(self):
- """
+ """Get the manifest with locator hints stripped.
+
Return the manifest for the current collection with all
non-portable hints (i.e., permission signatures and other
hints other than size hints) removed from the locators.
return ''.join(clean)
-class CollectionReader(CollectionBase):
- def __init__(self, manifest_locator_or_text, api_client=None,
- keep_client=None, num_retries=0):
- """Instantiate a CollectionReader.
-
- This class parses Collection manifests to provide a simple interface
- to read its underlying files.
-
- Arguments:
- * manifest_locator_or_text: One of a Collection UUID, portable data
- hash, or full manifest text.
- * api_client: The API client to use to look up Collections. If not
- provided, CollectionReader will build one from available Arvados
- configuration.
- * keep_client: The KeepClient to use to download Collection data.
- If not provided, CollectionReader will build one from available
- Arvados configuration.
- * num_retries: The default number of times to retry failed
- service requests. Default 0. You may change this value
- after instantiation, but note those changes may not
- propagate to related objects like the Keep client.
- """
- self._api_client = api_client
- self._keep_client = keep_client
- self.num_retries = num_retries
- if re.match(util.keep_locator_pattern, manifest_locator_or_text):
- self._manifest_locator = manifest_locator_or_text
- self._manifest_text = None
- elif re.match(util.collection_uuid_pattern, manifest_locator_or_text):
- self._manifest_locator = manifest_locator_or_text
- self._manifest_text = None
- elif re.match(util.manifest_pattern, manifest_locator_or_text):
- self._manifest_text = manifest_locator_or_text
- self._manifest_locator = None
- else:
- raise errors.ArgumentError(
- "Argument to CollectionReader must be a manifest or a collection UUID")
- self._api_response = None
- self._streams = None
-
- def _populate_from_api_server(self):
- # As in KeepClient itself, we must wait until the last
- # possible moment to instantiate an API client, in order to
- # avoid tripping up clients that don't have access to an API
- # server. If we do build one, make sure our Keep client uses
- # it. If instantiation fails, we'll fall back to the except
- # clause, just like any other Collection lookup
- # failure. Return an exception, or None if successful.
- try:
- if self._api_client is None:
- self._api_client = arvados.api('v1')
- self._keep_client = None # Make a new one with the new api.
- self._api_response = self._api_client.collections().get(
- uuid=self._manifest_locator).execute(
- num_retries=self.num_retries)
- self._manifest_text = self._api_response['manifest_text']
- return None
- except Exception as e:
- return e
-
- def _populate_from_keep(self):
- # Retrieve a manifest directly from Keep. This has a chance of
- # working if [a] the locator includes a permission signature
- # or [b] the Keep services are operating in world-readable
- # mode. Return an exception, or None if successful.
- try:
- self._manifest_text = self._my_keep().get(
- self._manifest_locator, num_retries=self.num_retries)
- except Exception as e:
- return e
-
- def _populate(self):
- error_via_api = None
- error_via_keep = None
- should_try_keep = ((self._manifest_text is None) and
- util.keep_locator_pattern.match(
- self._manifest_locator))
- if ((self._manifest_text is None) and
- util.signed_locator_pattern.match(self._manifest_locator)):
- error_via_keep = self._populate_from_keep()
- if self._manifest_text is None:
- error_via_api = self._populate_from_api_server()
- if error_via_api is not None and not should_try_keep:
- raise error_via_api
- if ((self._manifest_text is None) and
- not error_via_keep and
- should_try_keep):
- # Looks like a keep locator, and we didn't already try keep above
- error_via_keep = self._populate_from_keep()
- if self._manifest_text is None:
- # Nothing worked!
- raise arvados.errors.NotFoundError(
- ("Failed to retrieve collection '{}' " +
- "from either API server ({}) or Keep ({})."
- ).format(
- self._manifest_locator,
- error_via_api,
- error_via_keep))
- self._streams = [sline.split()
- for sline in self._manifest_text.split("\n")
- if sline]
-
- def _populate_first(orig_func):
- # Decorator for methods that read actual Collection data.
- @functools.wraps(orig_func)
- def wrapper(self, *args, **kwargs):
- if self._streams is None:
- self._populate()
- return orig_func(self, *args, **kwargs)
- return wrapper
-
- @_populate_first
- def api_response(self):
- """api_response() -> dict or None
-
- Returns information about this Collection fetched from the API server.
- If the Collection exists in Keep but not the API server, currently
- returns None. Future versions may provide a synthetic response.
- """
- return self._api_response
-
- @_populate_first
- def normalize(self):
- # Rearrange streams
- streams = {}
- for s in self.all_streams():
- for f in s.all_files():
- streamname, filename = split(s.name() + "/" + f.name())
- if streamname not in streams:
- streams[streamname] = {}
- if filename not in streams[streamname]:
- streams[streamname][filename] = []
- for r in f.segments:
- streams[streamname][filename].extend(s.locators_and_ranges(r[0], r[1]))
-
- self._streams = [normalize_stream(s, streams[s])
- for s in sorted(streams)]
-
- # Regenerate the manifest text based on the normalized streams
- self._manifest_text = ''.join(
- [StreamReader(stream, keep=self._my_keep()).manifest_text()
- for stream in self._streams])
-
- @_populate_first
- def open(self, streampath, filename=None):
- """open(streampath[, filename]) -> file-like object
-
- Pass in the path of a file to read from the Collection, either as a
- single string or as two separate stream name and file name arguments.
- This method returns a file-like object to read that file.
- """
- if filename is None:
- streampath, filename = split(streampath)
- keep_client = self._my_keep()
- for stream_s in self._streams:
- stream = StreamReader(stream_s, keep_client,
- num_retries=self.num_retries)
- if stream.name() == streampath:
- break
- else:
- raise ValueError("stream '{}' not found in Collection".
- format(streampath))
- try:
- return stream.files()[filename]
- except KeyError:
- raise ValueError("file '{}' not found in Collection stream '{}'".
- format(filename, streampath))
-
- @_populate_first
- def all_streams(self):
- return [StreamReader(s, self._my_keep(), num_retries=self.num_retries)
- for s in self._streams]
-
- def all_files(self):
- for s in self.all_streams():
- for f in s.all_files():
- yield f
-
- @_populate_first
- def manifest_text(self, strip=False, normalize=False):
- if normalize:
- cr = CollectionReader(self.manifest_text())
- cr.normalize()
- return cr.manifest_text(strip=strip, normalize=False)
- elif strip:
- return self.stripped_manifest()
- else:
- return self._manifest_text
-
-
-class _WriterFile(ArvadosFileBase):
+class _WriterFile(_FileLikeObjectBase):
def __init__(self, coll_writer, name):
super(_WriterFile, self).__init__(name, 'wb')
self.dest = coll_writer
super(_WriterFile, self).close()
self.dest.finish_current_file()
- @ArvadosFileBase._before_close
+ @_FileLikeObjectBase._before_close
def write(self, data):
self.dest.write(data)
- @ArvadosFileBase._before_close
+ @_FileLikeObjectBase._before_close
def writelines(self, seq):
for data in seq:
self.write(data)
- @ArvadosFileBase._before_close
+ @_FileLikeObjectBase._before_close
def flush(self):
self.dest.flush_data()
class CollectionWriter(CollectionBase):
- KEEP_BLOCK_SIZE = 2**26
-
def __init__(self, api_client=None, num_retries=0, replication=None):
"""Instantiate a CollectionWriter.
def _work_file(self):
while True:
- buf = self._queued_file.read(self.KEEP_BLOCK_SIZE)
+ buf = self._queued_file.read(config.KEEP_BLOCK_SIZE)
if not buf:
break
self.write(buf)
self._data_buffer.append(newdata)
self._data_buffer_len += len(newdata)
self._current_stream_length += len(newdata)
- while self._data_buffer_len >= self.KEEP_BLOCK_SIZE:
+ while self._data_buffer_len >= config.KEEP_BLOCK_SIZE:
self.flush_data()
def open(self, streampath, filename=None):
if data_buffer:
self._current_stream_locators.append(
self._my_keep().put(
- data_buffer[0:self.KEEP_BLOCK_SIZE],
+ data_buffer[0:config.KEEP_BLOCK_SIZE],
copies=self.replication))
- self._data_buffer = [data_buffer[self.KEEP_BLOCK_SIZE:]]
+ self._data_buffer = [data_buffer[config.KEEP_BLOCK_SIZE:]]
self._data_buffer_len = len(self._data_buffer[0])
def start_new_file(self, newfilename=None):
raise errors.AssertionError(
"resumable writer can't accept unsourced data")
return super(ResumableCollectionWriter, self).write(data)
+
+ADD = "add"
+DEL = "del"
+MOD = "mod"
+FILE = "file"
+COLLECTION = "collection"
+
+class SynchronizedCollectionBase(CollectionBase):
+ """Base class for Collections and Subcollections.
+
+ Implements the majority of functionality relating to accessing items in the
+ Collection.
+
+ """
+
+ def __init__(self, parent=None):
+ self.parent = parent
+ self._modified = True
+ self._items = {}
+
+ def _my_api(self):
+ raise NotImplementedError()
+
+ def _my_keep(self):
+ raise NotImplementedError()
+
+ def _my_block_manager(self):
+ raise NotImplementedError()
+
+ def writable(self):
+ raise NotImplementedError()
+
+ def root_collection(self):
+ raise NotImplementedError()
+
+ def notify(self, event, collection, name, item):
+ raise NotImplementedError()
+
+ def stream_name(self):
+ raise NotImplementedError()
+
+ @must_be_writable
+ @synchronized
+ def find_or_create(self, path, create_type):
+ """Recursively search the specified file path.
+
+ May return either a `Collection` or `ArvadosFile`. If not found, will
+ create a new item at the specified path based on `create_type`. Will
+ create intermediate subcollections needed to contain the final item in
+ the path.
+
+ :create_type:
+ One of `arvados.collection.FILE` or
+ `arvados.collection.COLLECTION`. If the path is not found, and value
+ of create_type is FILE then create and return a new ArvadosFile for
+ the last path component. If COLLECTION, then create and return a new
+ Collection for the last path component.
+
+ """
+
+ pathcomponents = path.split("/", 1)
+ if pathcomponents[0]:
+ item = self._items.get(pathcomponents[0])
+ if len(pathcomponents) == 1:
+ if item is None:
+ # create new file
+ if create_type == COLLECTION:
+ item = Subcollection(self)
+ else:
+ item = ArvadosFile(self)
+ self._items[pathcomponents[0]] = item
+ self._modified = True
+ self.notify(ADD, self, pathcomponents[0], item)
+ return item
+ else:
+ if item is None:
+ # create new collection
+ item = Subcollection(self)
+ self._items[pathcomponents[0]] = item
+ self._modified = True
+ self.notify(ADD, self, pathcomponents[0], item)
+ if isinstance(item, SynchronizedCollectionBase):
+ return item.find_or_create(pathcomponents[1], create_type)
+ else:
+ raise IOError((errno.ENOTDIR, "Interior path components must be subcollection"))
+ else:
+ return self
+
+ @synchronized
+ def find(self, path):
+ """Recursively search the specified file path.
+
+ May return either a Collection or ArvadosFile. Return None if not
+ found.
+
+ """
+ if not path:
+ raise errors.ArgumentError("Parameter 'path' must not be empty.")
+
+ pathcomponents = path.split("/", 1)
+ item = self._items.get(pathcomponents[0])
+ if len(pathcomponents) == 1:
+ return item
+ else:
+ if isinstance(item, SynchronizedCollectionBase):
+ if pathcomponents[1]:
+ return item.find(pathcomponents[1])
+ else:
+ return item
+ else:
+ raise IOError((errno.ENOTDIR, "Interior path components must be subcollection"))
+
+ def mkdirs(path):
+ """Recursive subcollection create.
+
+ Like `os.mkdirs()`. Will create intermediate subcollections needed to
+ contain the leaf subcollection path.
+
+ """
+ return self.find_or_create(path, COLLECTION)
+
+ def open(self, path, mode="r"):
+ """Open a file-like object for access.
+
+ :path:
+ path to a file in the collection
+ :mode:
+ one of "r", "r+", "w", "w+", "a", "a+"
+ :"r":
+ opens for reading
+ :"r+":
+ opens for reading and writing. Reads/writes share a file pointer.
+ :"w", "w+":
+ truncates to 0 and opens for reading and writing. Reads/writes share a file pointer.
+ :"a", "a+":
+ opens for reading and writing. All writes are appended to
+ the end of the file. Writing does not affect the file pointer for
+ reading.
+ """
+ mode = mode.replace("b", "")
+ if len(mode) == 0 or mode[0] not in ("r", "w", "a"):
+ raise errors.ArgumentError("Bad mode '%s'" % mode)
+ create = (mode != "r")
+
+ if create and not self.writable():
+ raise IOError((errno.EROFS, "Collection is read only"))
+
+ if create:
+ arvfile = self.find_or_create(path, FILE)
+ else:
+ arvfile = self.find(path)
+
+ if arvfile is None:
+ raise IOError((errno.ENOENT, "File not found"))
+ if not isinstance(arvfile, ArvadosFile):
+ raise IOError((errno.EISDIR, "Path must refer to a file."))
+
+ if mode[0] == "w":
+ arvfile.truncate(0)
+
+ name = os.path.basename(path)
+
+ if mode == "r":
+ return ArvadosFileReader(arvfile, name, mode, num_retries=self.num_retries)
+ else:
+ return ArvadosFileWriter(arvfile, name, mode, num_retries=self.num_retries)
+
+ @synchronized
+ def modified(self):
+ """Test if the collection (or any subcollection or file) has been modified."""
+ if self._modified:
+ return True
+ for k,v in self._items.items():
+ if v.modified():
+ return True
+ return False
+
+ @synchronized
+ def set_unmodified(self):
+ """Recursively clear modified flag."""
+ self._modified = False
+ for k,v in self._items.items():
+ v.set_unmodified()
+
+ @synchronized
+ def __iter__(self):
+ """Iterate over names of files and collections contained in this collection."""
+ return iter(self._items.keys())
+
+ @synchronized
+ def __getitem__(self, k):
+ """Get a file or collection that is directly contained by this collection.
+
+ If you want to search a path, use `find()` instead.
+
+ """
+ return self._items[k]
+
+ @synchronized
+ def __contains__(self, k):
+ """Test if there is a file or collection a directly contained by this collection."""
+ return k in self._items
+
+ @synchronized
+ def __len__(self):
+ """Get the number of items directly contained in this collection."""
+ return len(self._items)
+
+ @must_be_writable
+ @synchronized
+ def __delitem__(self, p):
+ """Delete an item by name which is directly contained by this collection."""
+ del self._items[p]
+ self._modified = True
+ self.notify(DEL, self, p, None)
+
+ @synchronized
+ def keys(self):
+ """Get a list of names of files and collections directly contained in this collection."""
+ return self._items.keys()
+
+ @synchronized
+ def values(self):
+ """Get a list of files and collection objects directly contained in this collection."""
+ return self._items.values()
+
+ @synchronized
+ def items(self):
+ """Get a list of (name, object) tuples directly contained in this collection."""
+ return self._items.items()
+
+ def exists(self, path):
+ """Test if there is a file or collection at `path`."""
+ return self.find(path) != None
+
+ @must_be_writable
+ @synchronized
+ def remove(self, path, recursive=False):
+ """Remove the file or subcollection (directory) at `path`.
+
+ :recursive:
+ Specify whether to remove non-empty subcollections (True), or raise an error (False).
+ """
+
+ if not path:
+ raise errors.ArgumentError("Parameter 'path' must not be empty.")
+
+ pathcomponents = path.split("/", 1)
+ item = self._items.get(pathcomponents[0])
+ if item is None:
+ raise IOError((errno.ENOENT, "File not found"))
+ if len(pathcomponents) == 1:
+ if isinstance(self._items[pathcomponents[0]], SynchronizedCollectionBase) and len(self._items[pathcomponents[0]]) > 0 and not recursive:
+ raise IOError((errno.ENOTEMPTY, "Subcollection not empty"))
+ deleteditem = self._items[pathcomponents[0]]
+ del self._items[pathcomponents[0]]
+ self._modified = True
+ self.notify(DEL, self, pathcomponents[0], deleteditem)
+ else:
+ item.remove(pathcomponents[1])
+
+ def _clonefrom(self, source):
+ for k,v in source.items():
+ self._items[k] = v.clone(self)
+
+ def clone(self):
+ raise NotImplementedError()
+
+ @must_be_writable
+ @synchronized
+ def copy(self, source, target_path, source_collection=None, overwrite=False):
+ """Copy a file or subcollection to a new path in this collection.
+
+ :source:
+ An ArvadosFile, Subcollection, or string with a path to source file or subcollection
+
+ :target_path:
+ Destination file or path. If the target path already exists and is a
+ subcollection, the item will be placed inside the subcollection. If
+ the target path already exists and is a file, this will raise an error
+ unless you specify `overwrite=True`.
+
+ :source_collection:
+ Collection to copy `source_path` from (default `self`)
+
+ :overwrite:
+ Whether to overwrite target file if it already exists.
+ """
+ if source_collection is None:
+ source_collection = self
+
+ # Find the object to copy
+ if isinstance(source, basestring):
+ source_obj = source_collection.find(source)
+ if source_obj is None:
+ raise IOError((errno.ENOENT, "File not found"))
+ sourcecomponents = source.split("/")
+ else:
+ source_obj = source
+ sourcecomponents = None
+
+ # Find parent collection the target path
+ targetcomponents = target_path.split("/")
+
+ # Determine the name to use.
+ target_name = targetcomponents[-1] if targetcomponents[-1] else (sourcecomponents[-1] if sourcecomponents else None)
+
+ if not target_name:
+ raise errors.ArgumentError("Target path is empty and source is an object. Cannot determine destination filename to use.")
+
+ target_dir = self.find_or_create("/".join(targetcomponents[0:-1]), COLLECTION)
+
+ if target_name in target_dir:
+ if isinstance(target_dir[target_name], SynchronizedCollectionBase) and sourcecomponents:
+ target_dir = target_dir[target_name]
+ target_name = sourcecomponents[-1]
+ elif not overwrite:
+ raise IOError((errno.EEXIST, "File already exists"))
+
+ modified_from = None
+ if target_name in target_dir:
+ modified_from = target_dir[target_name]
+
+ # Actually make the copy.
+ dup = source_obj.clone(target_dir)
+ target_dir._items[target_name] = dup
+ target_dir._modified = True
+
+ if modified_from:
+ self.notify(MOD, target_dir, target_name, (modified_from, dup))
+ else:
+ self.notify(ADD, target_dir, target_name, dup)
+
+ @synchronized
+ def manifest_text(self, stream_name=".", strip=False, normalize=False):
+ """Get the manifest text for this collection, sub collections and files.
+
+ :stream_name:
+ Name of the stream (directory)
+
+ :strip:
+ If True, remove signing tokens from block locators if present.
+ If False (default), block locators are left unchanged.
+
+ :normalize:
+ If True, always export the manifest text in normalized form
+ even if the Collection is not modified. If False (default) and the collection
+ is not modified, return the original manifest text even if it is not
+ in normalized form.
+
+ """
+
+ if self.modified() or self._manifest_text is None or normalize:
+ item = self
+ stream = {}
+ buf = []
+ sorted_keys = sorted(item.keys())
+ for filename in [s for s in sorted_keys if isinstance(item[s], ArvadosFile)]:
+ # Create a stream per file `k`
+ arvfile = item[filename]
+ filestream = []
+ for segment in arvfile.segments():
+ loc = segment.locator
+ if arvfile.parent._my_block_manager().is_bufferblock(loc):
+ loc = arvfile.parent._my_block_manager().get_bufferblock(loc).locator()
+ if strip:
+ loc = KeepLocator(loc).stripped()
+ filestream.append(LocatorAndRange(loc, KeepLocator(loc).size,
+ segment.segment_offset, segment.range_size))
+ stream[filename] = filestream
+ if stream:
+ buf.append(" ".join(normalize_stream(stream_name, stream)) + "\n")
+ for dirname in [s for s in sorted_keys if isinstance(item[s], SynchronizedCollectionBase)]:
+ buf.append(item[dirname].manifest_text(stream_name=os.path.join(stream_name, dirname), strip=strip))
+ return "".join(buf)
+ else:
+ if strip:
+ return self.stripped_manifest()
+ else:
+ return self._manifest_text
+
+ @synchronized
+ def diff(self, end_collection, prefix=".", holding_collection=None):
+ """Generate list of add/modify/delete actions.
+
+ When given to `apply`, will change `self` to match `end_collection`
+
+ """
+ changes = []
+ if holding_collection is None:
+ holding_collection = Collection(api_client=self._my_api(), keep_client=self._my_keep())
+ for k in self:
+ if k not in end_collection:
+ changes.append((DEL, os.path.join(prefix, k), self[k].clone(holding_collection)))
+ for k in end_collection:
+ if k in self:
+ if isinstance(end_collection[k], Subcollection) and isinstance(self[k], Subcollection):
+ changes.extend(self[k].diff(end_collection[k], os.path.join(prefix, k), holding_collection))
+ elif end_collection[k] != self[k]:
+ changes.append((MOD, os.path.join(prefix, k), self[k].clone(holding_collection), end_collection[k].clone(holding_collection)))
+ else:
+ changes.append((ADD, os.path.join(prefix, k), end_collection[k].clone(holding_collection)))
+ return changes
+
+ @must_be_writable
+ @synchronized
+ def apply(self, changes):
+ """Apply changes from `diff`.
+
+ If a change conflicts with a local change, it will be saved to an
+ alternate path indicating the conflict.
+
+ """
+ for change in changes:
+ event_type = change[0]
+ path = change[1]
+ initial = change[2]
+ local = self.find(path)
+ conflictpath = "%s~conflict-%s~" % (path, time.strftime("%Y-%m-%d-%H:%M:%S",
+ time.gmtime()))
+ if event_type == ADD:
+ if local is None:
+ # No local file at path, safe to copy over new file
+ self.copy(initial, path)
+ elif local is not None and local != initial:
+ # There is already local file and it is different:
+ # save change to conflict file.
+ self.copy(initial, conflictpath)
+ elif event_type == MOD:
+ final = change[3]
+ if local == initial:
+ # Local matches the "initial" item so it has not
+ # changed locally and is safe to update.
+ if isinstance(local, ArvadosFile) and isinstance(final, ArvadosFile):
+ # Replace contents of local file with new contents
+ local.replace_contents(final)
+ else:
+ # Overwrite path with new item; this can happen if
+ # path was a file and is now a collection or vice versa
+ self.copy(final, path, overwrite=True)
+ else:
+ # Local is missing (presumably deleted) or local doesn't
+ # match the "start" value, so save change to conflict file
+ self.copy(final, conflictpath)
+ elif event_type == DEL:
+ if local == initial:
+ # Local item matches "initial" value, so it is safe to remove.
+ self.remove(path, recursive=True)
+ # else, the file is modified or already removed, in either
+ # case we don't want to try to remove it.
+
+ def portable_data_hash(self):
+ """Get the portable data hash for this collection's manifest."""
+ stripped = self.manifest_text(strip=True)
+ return hashlib.md5(stripped).hexdigest() + '+' + str(len(stripped))
+
+ @synchronized
+ def __eq__(self, other):
+ if other is self:
+ return True
+ if not isinstance(other, SynchronizedCollectionBase):
+ return False
+ if len(self._items) != len(other):
+ return False
+ for k in self._items:
+ if k not in other:
+ return False
+ if self._items[k] != other[k]:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+
+class Collection(SynchronizedCollectionBase):
+ """Represents the root of an Arvados Collection.
+
+ This class is threadsafe. The root collection object, all subcollections
+ and files are protected by a single lock (i.e. each access locks the entire
+ collection).
+
+ Brief summary of
+ useful methods:
+
+ :To read an existing file:
+ `c.open("myfile", "r")`
+
+ :To write a new file:
+ `c.open("myfile", "w")`
+
+ :To determine if a file exists:
+ `c.find("myfile") is not None`
+
+ :To copy a file:
+ `c.copy("source", "dest")`
+
+ :To delete a file:
+ `c.remove("myfile")`
+
+ :To save to an existing collection record:
+ `c.save()`
+
+ :To save a new collection record:
+ `c.save_new()`
+
+ :To merge remote changes into this object:
+ `c.update()`
+
+ Must be associated with an API server Collection record (during
+ initialization, or using `save_new`) to use `save` or `update`
+
+ """
+
+ def __init__(self, manifest_locator_or_text=None,
+ api_client=None,
+ keep_client=None,
+ num_retries=None,
+ parent=None,
+ apiconfig=None,
+ block_manager=None):
+ """Collection constructor.
+
+ :manifest_locator_or_text:
+ One of Arvados collection UUID, block locator of
+ a manifest, raw manifest text, or None (to create an empty collection).
+ :parent:
+ the parent Collection, may be None.
+ :apiconfig:
+ A dict containing keys for ARVADOS_API_HOST and ARVADOS_API_TOKEN.
+ Prefer this over supplying your own api_client and keep_client (except in testing).
+ Will use default config settings if not specified.
+ :api_client:
+ The API client object to use for requests. If not specified, create one using `apiconfig`.
+ :keep_client:
+ the Keep client to use for requests. If not specified, create one using `apiconfig`.
+ :num_retries:
+ the number of retries for API and Keep requests.
+ :block_manager:
+ the block manager to use. If not specified, create one.
+
+ """
+ super(Collection, self).__init__(parent)
+ self._api_client = api_client
+ self._keep_client = keep_client
+ self._block_manager = block_manager
+
+ if apiconfig:
+ self._config = apiconfig
+ else:
+ self._config = config.settings()
+
+ self.num_retries = num_retries if num_retries is not None else 0
+ self._manifest_locator = None
+ self._manifest_text = None
+ self._api_response = None
+
+ self.lock = threading.RLock()
+ self.callbacks = []
+ self.events = None
+
+ if manifest_locator_or_text:
+ if re.match(util.keep_locator_pattern, manifest_locator_or_text):
+ self._manifest_locator = manifest_locator_or_text
+ elif re.match(util.collection_uuid_pattern, manifest_locator_or_text):
+ self._manifest_locator = manifest_locator_or_text
+ elif re.match(util.manifest_pattern, manifest_locator_or_text):
+ self._manifest_text = manifest_locator_or_text
+ else:
+ raise errors.ArgumentError(
+ "Argument to CollectionReader must be a manifest or a collection UUID")
+
+ try:
+ self._populate()
+ except (IOError, errors.SyntaxError) as e:
+ raise errors.ArgumentError("Error processing manifest text: %s", e)
+
+ def root_collection(self):
+ return self
+
+ def stream_name(self):
+ return "."
+
+ def writable(self):
+ return True
+
+ @synchronized
+ @retry_method
+ def update(self, other=None, num_retries=None):
+ """Merge the latest collection on the API server with the current collection."""
+
+ if other is None:
+ if self._manifest_locator is None:
+ raise errors.ArgumentError("`other` is None but collection does not have a manifest_locator uuid")
+ response = self._my_api().collections().get(uuid=self._manifest_locator).execute(num_retries=num_retries)
+ other = CollectionReader(response["manifest_text"])
+ baseline = CollectionReader(self._manifest_text)
+ self.apply(baseline.diff(other))
+
+ @synchronized
+ def _my_api(self):
+ if self._api_client is None:
+ self._api_client = ThreadSafeApiCache(self._config)
+ self._keep_client = self._api_client.keep
+ return self._api_client
+
+ @synchronized
+ def _my_keep(self):
+ if self._keep_client is None:
+ if self._api_client is None:
+ self._my_api()
+ else:
+ self._keep_client = KeepClient(api_client=self._api_client)
+ return self._keep_client
+
+ @synchronized
+ def _my_block_manager(self):
+ if self._block_manager is None:
+ self._block_manager = _BlockManager(self._my_keep())
+ return self._block_manager
+
+ def _populate_from_api_server(self):
+ # As in KeepClient itself, we must wait until the last
+ # possible moment to instantiate an API client, in order to
+ # avoid tripping up clients that don't have access to an API
+ # server. If we do build one, make sure our Keep client uses
+ # it. If instantiation fails, we'll fall back to the except
+ # clause, just like any other Collection lookup
+ # failure. Return an exception, or None if successful.
+ try:
+ self._api_response = self._my_api().collections().get(
+ uuid=self._manifest_locator).execute(
+ num_retries=self.num_retries)
+ self._manifest_text = self._api_response['manifest_text']
+ return None
+ except Exception as e:
+ return e
+
+ def _populate_from_keep(self):
+ # Retrieve a manifest directly from Keep. This has a chance of
+ # working if [a] the locator includes a permission signature
+ # or [b] the Keep services are operating in world-readable
+ # mode. Return an exception, or None if successful.
+ try:
+ self._manifest_text = self._my_keep().get(
+ self._manifest_locator, num_retries=self.num_retries)
+ except Exception as e:
+ return e
+
+ def _populate(self):
+ if self._manifest_locator is None and self._manifest_text is None:
+ return
+ error_via_api = None
+ error_via_keep = None
+ should_try_keep = ((self._manifest_text is None) and
+ util.keep_locator_pattern.match(
+ self._manifest_locator))
+ if ((self._manifest_text is None) and
+ util.signed_locator_pattern.match(self._manifest_locator)):
+ error_via_keep = self._populate_from_keep()
+ if self._manifest_text is None:
+ error_via_api = self._populate_from_api_server()
+ if error_via_api is not None and not should_try_keep:
+ raise error_via_api
+ if ((self._manifest_text is None) and
+ not error_via_keep and
+ should_try_keep):
+ # Looks like a keep locator, and we didn't already try keep above
+ error_via_keep = self._populate_from_keep()
+ if self._manifest_text is None:
+ # Nothing worked!
+ raise errors.NotFoundError(
+ ("Failed to retrieve collection '{}' " +
+ "from either API server ({}) or Keep ({})."
+ ).format(
+ self._manifest_locator,
+ error_via_api,
+ error_via_keep))
+ # populate
+ self._baseline_manifest = self._manifest_text
+ self._import_manifest(self._manifest_text)
+
+
+ def _has_collection_uuid(self):
+ return self._manifest_locator is not None and re.match(util.collection_uuid_pattern, self._manifest_locator)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """Support scoped auto-commit in a with: block."""
+ if exc_type is not None:
+ if self.writable() and self._has_collection_uuid():
+ self.save()
+ if self._block_manager is not None:
+ self._block_manager.stop_threads()
+
+ @synchronized
+ def clone(self, new_parent=None, readonly=False, new_config=None):
+ if new_config is None:
+ new_config = self._config
+ if readonly:
+ newcollection = CollectionReader(parent=new_parent, apiconfig=new_config)
+ else:
+ newcollection = Collection(parent=new_parent, apiconfig=new_config)
+
+ newcollection._clonefrom(self)
+ return newcollection
+
+ @synchronized
+ def api_response(self):
+ """Returns information about this Collection fetched from the API server.
+
+ If the Collection exists in Keep but not the API server, currently
+ returns None. Future versions may provide a synthetic response.
+
+ """
+ return self._api_response
+
+ def find_or_create(self, path, create_type):
+ """See `SynchronizedCollectionBase.find_or_create`"""
+ if path == ".":
+ return self
+ else:
+ return super(Collection, self).find_or_create(path[2:] if path.startswith("./") else path, create_type)
+
+ def find(self, path):
+ """See `SynchronizedCollectionBase.find`"""
+ if path == ".":
+ return self
+ else:
+ return super(Collection, self).find(path[2:] if path.startswith("./") else path)
+
+ def remove(self, path, recursive=False):
+ """See `SynchronizedCollectionBase.remove`"""
+ if path == ".":
+ raise errors.ArgumentError("Cannot remove '.'")
+ else:
+ return super(Collection, self).remove(path[2:] if path.startswith("./") else path, recursive)
+
+ @must_be_writable
+ @synchronized
+ @retry_method
+ def save(self, merge=True, num_retries=None):
+ """Save collection to an existing collection record.
+
+ Commit pending buffer blocks to Keep, merge with remote record (if
+ update=True), write the manifest to Keep, and update the collection
+ record.
+
+ Will raise AssertionError if not associated with a collection record on
+ the API server. If you want to save a manifest to Keep only, see
+ `save_new()`.
+
+ :update:
+ Update and merge remote changes before saving. Otherwise, any
+ remote changes will be ignored and overwritten.
+
+ """
+ if self.modified():
+ if not self._has_collection_uuid():
+ raise AssertionError("Collection manifest_locator must be a collection uuid. Use save_new() for new collections.")
+ self._my_block_manager().commit_all()
+ if merge:
+ self.update()
+ self._my_keep().put(self.manifest_text(strip=True), num_retries=num_retries)
+
+ text = self.manifest_text(strip=False)
+ self._api_response = self._my_api().collections().update(
+ uuid=self._manifest_locator,
+ body={'manifest_text': text}
+ ).execute(
+ num_retries=num_retries)
+ self._manifest_text = self._api_response["manifest_text"]
+ self.set_unmodified()
+
+
+ @must_be_writable
+ @synchronized
+ @retry_method
+ def save_new(self, name=None, create_collection_record=True, owner_uuid=None, ensure_unique_name=False, num_retries=None):
+ """Save collection to a new collection record.
+
+ Commit pending buffer blocks to Keep, write the manifest to Keep, and
+ create a new collection record (if create_collection_record True).
+ After creating a new collection record, this Collection object will be
+ associated with the new record used by `save()`.
+
+ :name:
+ The collection name.
+
+ :keep_only:
+ Only save the manifest to keep, do not create a collection record.
+
+ :owner_uuid:
+ the user, or project uuid that will own this collection.
+ If None, defaults to the current user.
+
+ :ensure_unique_name:
+ If True, ask the API server to rename the collection
+ if it conflicts with a collection with the same name and owner. If
+ False, a name conflict will result in an error.
+
+ """
+ self._my_block_manager().commit_all()
+ self._my_keep().put(self.manifest_text(strip=True), num_retries=num_retries)
+ text = self.manifest_text(strip=False)
+
+ if create_collection_record:
+ if name is None:
+ name = "Collection created %s" % (time.strftime("%Y-%m-%d %H:%M:%S %Z", time.localtime()))
+
+ body = {"manifest_text": text,
+ "name": name}
+ if owner_uuid:
+ body["owner_uuid"] = owner_uuid
+
+ self._api_response = self._my_api().collections().create(ensure_unique_name=ensure_unique_name, body=body).execute(num_retries=num_retries)
+ text = self._api_response["manifest_text"]
+
+ self._manifest_locator = self._api_response["uuid"]
+
+ self._manifest_text = text
+ self.set_unmodified()
+
+ @synchronized
+ def subscribe(self, callback):
+ self.callbacks.append(callback)
+
+ @synchronized
+ def unsubscribe(self, callback):
+ self.callbacks.remove(callback)
+
+ @synchronized
+ def notify(self, event, collection, name, item):
+ for c in self.callbacks:
+ c(event, collection, name, item)
+
+ @synchronized
+ def _import_manifest(self, manifest_text):
+ """Import a manifest into a `Collection`.
+
+ :manifest_text:
+ The manifest text to import from.
+
+ """
+ if len(self) > 0:
+ raise ArgumentError("Can only import manifest into an empty collection")
+
+ STREAM_NAME = 0
+ BLOCKS = 1
+ SEGMENTS = 2
+
+ stream_name = None
+ state = STREAM_NAME
+
+ for token_and_separator in re.finditer(r'(\S+)(\s+|$)', manifest_text):
+ tok = token_and_separator.group(1)
+ sep = token_and_separator.group(2)
+
+ if state == STREAM_NAME:
+ # starting a new stream
+ stream_name = tok.replace('\\040', ' ')
+ blocks = []
+ segments = []
+ streamoffset = 0L
+ state = BLOCKS
+ continue
+
+ if state == BLOCKS:
+ block_locator = re.match(r'[0-9a-f]{32}\+(\d+)(\+\S+)*', tok)
+ if block_locator:
+ blocksize = long(block_locator.group(1))
+ blocks.append(Range(tok, streamoffset, blocksize))
+ streamoffset += blocksize
+ else:
+ state = SEGMENTS
+
+ if state == SEGMENTS:
+ file_segment = re.search(r'^(\d+):(\d+):(\S+)', tok)
+ if file_segment:
+ pos = long(file_segment.group(1))
+ size = long(file_segment.group(2))
+ name = file_segment.group(3).replace('\\040', ' ')
+ filepath = os.path.join(stream_name, name)
+ afile = self.find_or_create(filepath, FILE)
+ if isinstance(afile, ArvadosFile):
+ afile.add_segment(blocks, pos, size)
+ else:
+ raise errors.SyntaxError("File %s conflicts with stream of the same name.", filepath)
+ else:
+ # error!
+ raise errors.SyntaxError("Invalid manifest format")
+
+ if sep == "\n":
+ stream_name = None
+ state = STREAM_NAME
+
+ self.set_unmodified()
+
+
+class Subcollection(SynchronizedCollectionBase):
+ """This is a subdirectory within a collection that doesn't have its own API
+ server record.
+
+ It falls under the umbrella of the root collection.
+
+ """
+
+ def __init__(self, parent):
+ super(Subcollection, self).__init__(parent)
+ self.lock = self.root_collection().lock
+ self._manifest_text = None
+
+ def root_collection(self):
+ return self.parent.root_collection()
+
+ def writable(self):
+ return self.root_collection().writable()
+
+ def _my_api(self):
+ return self.root_collection()._my_api()
+
+ def _my_keep(self):
+ return self.root_collection()._my_keep()
+
+ def _my_block_manager(self):
+ return self.root_collection()._my_block_manager()
+
+ def notify(self, event, collection, name, item):
+ return self.root_collection().notify(event, collection, name, item)
+
+ def stream_name(self):
+ for k, v in self.parent.items():
+ if v is self:
+ return os.path.join(self.parent.stream_name(), k)
+ return '.'
+
+ @synchronized
+ def clone(self, new_parent):
+ c = Subcollection(new_parent)
+ c._clonefrom(self)
+ return c
+
+
+class CollectionReader(Collection):
+ """A read-only collection object.
+
+ Initialize from an api collection record locator, a portable data hash of a
+ manifest, or raw manifest text. See `Collection` constructor for detailed
+ options.
+
+ """
+ def __init__(self, manifest_locator_or_text, *args, **kwargs):
+ self._in_init = True
+ super(CollectionReader, self).__init__(manifest_locator_or_text, *args, **kwargs)
+ self._in_init = False
+
+ # Forego any locking since it should never change once initialized.
+ self.lock = NoopLock()
+
+ # Backwards compatability with old CollectionReader
+ # all_streams() and all_files()
+ self._streams = None
+
+ def writable(self):
+ return self._in_init
+
+ def _populate_streams(orig_func):
+ @functools.wraps(orig_func)
+ def populate_streams_wrapper(self, *args, **kwargs):
+ # Defer populating self._streams until needed since it creates a copy of the manifest.
+ if self._streams is None:
+ if self._manifest_text:
+ self._streams = [sline.split()
+ for sline in self._manifest_text.split("\n")
+ if sline]
+ else:
+ self._streams = []
+ return orig_func(self, *args, **kwargs)
+ return populate_streams_wrapper
+
+ @_populate_streams
+ def normalize(self):
+ """Normalize the streams returned by `all_streams`.
+
+ This method is kept for backwards compatability and only affects the
+ behavior of `all_streams()` and `all_files()`
+
+ """
+
+ # Rearrange streams
+ streams = {}
+ for s in self.all_streams():
+ for f in s.all_files():
+ streamname, filename = split(s.name() + "/" + f.name())
+ if streamname not in streams:
+ streams[streamname] = {}
+ if filename not in streams[streamname]:
+ streams[streamname][filename] = []
+ for r in f.segments:
+ streams[streamname][filename].extend(s.locators_and_ranges(r.locator, r.range_size))
+
+ self._streams = [normalize_stream(s, streams[s])
+ for s in sorted(streams)]
+ @_populate_streams
+ def all_streams(self):
+ return [StreamReader(s, self._my_keep(), num_retries=self.num_retries)
+ for s in self._streams]
+
+ @_populate_streams
+ def all_files(self):
+ for s in self.all_streams():
+ for f in s.all_files():
+ yield f
def flush_data(self):
start_buffer_len = self._data_buffer_len
- start_block_count = self.bytes_written / self.KEEP_BLOCK_SIZE
+ start_block_count = self.bytes_written / arvados.config.KEEP_BLOCK_SIZE
super(ArvPutCollectionWriter, self).flush_data()
if self._data_buffer_len < start_buffer_len: # We actually PUT data.
self.bytes_written += (start_buffer_len - self._data_buffer_len)
self.report_progress()
- if (self.bytes_written / self.KEEP_BLOCK_SIZE) > start_block_count:
+ if (self.bytes_written / arvados.config.KEEP_BLOCK_SIZE) > start_block_count:
self.cache_state()
def _record_new_input(self, input_type, source_name, dest_name):
else:
default_config_file = ''
+KEEP_BLOCK_SIZE = 2**26
EMPTY_BLOCK_LOCATOR = 'd41d8cd98f00b204e9800998ecf8427e+0'
def initialize(config_file=default_config_file):
cfg[var] = val
return cfg
-def flag_is_true(key):
- return get(key, '').lower() in set(['1', 't', 'true', 'y', 'yes'])
+def flag_is_true(key, d=None):
+ if d is None:
+ d = settings()
+ return d.get(key, '').lower() in set(['1', 't', 'true', 'y', 'yes'])
def get(key, default_val=None):
return settings().get(key, default_val)
def __init__(self, message='', service_errors=()):
"""KeepRequestError(message='', service_errors=())
- Arguments:
- * message: A human-readable message describing what Keep operation
+ :message:
+ A human-readable message describing what Keep operation
failed.
- * service_errors: An iterable that yields 2-tuples of Keep
+
+ :service_errors:
+ An iterable that yields 2-tuples of Keep
service URLs to the error encountered when talking to
it--either an exception, or an HTTP response object. These
will be packed into an OrderedDict, available through the
service_errors() method.
+
"""
self._service_errors = OrderedDict(service_errors)
if self._service_errors:
self.permission_hint()] + self.hints
if s is not None)
+ def stripped(self):
+ return "%s+%i" % (self.md5sum, self.size)
+
def _make_hex_prop(name, length):
# Build and return a new property with the given name that
# must be a hex string of the given length.
def cap_cache(self):
'''Cap the cache size to self.cache_max'''
- self._cache_lock.acquire()
- try:
+ with self._cache_lock:
# Select all slots except those where ready.is_set() and content is
# None (that means there was an error reading the block).
self._cache = [c for c in self._cache if not (c.ready.is_set() and c.content is None)]
del self._cache[i]
break
sm = sum([slot.size() for slot in self._cache])
- finally:
- self._cache_lock.release()
+
+ def _get(self, locator):
+ # Test if the locator is already in the cache
+ for i in xrange(0, len(self._cache)):
+ if self._cache[i].locator == locator:
+ n = self._cache[i]
+ if i != 0:
+ # move it to the front
+ del self._cache[i]
+ self._cache.insert(0, n)
+ return n
+ return None
+
+ def get(self, locator):
+ with self._cache_lock:
+ return self._get(locator)
def reserve_cache(self, locator):
'''Reserve a cache slot for the specified locator,
or return the existing slot.'''
- self._cache_lock.acquire()
- try:
- # Test if the locator is already in the cache
- for i in xrange(0, len(self._cache)):
- if self._cache[i].locator == locator:
- n = self._cache[i]
- if i != 0:
- # move it to the front
- del self._cache[i]
- self._cache.insert(0, n)
- return n, False
-
- # Add a new cache slot for the locator
- n = KeepBlockCache.CacheSlot(locator)
- self._cache.insert(0, n)
- return n, True
- finally:
- self._cache_lock.release()
+ with self._cache_lock:
+ n = self._get(locator)
+ if n:
+ return n, False
+ else:
+ # Add a new cache slot for the locator
+ n = KeepBlockCache.CacheSlot(locator)
+ self._cache.insert(0, n)
+ return n, True
class KeepClient(object):
HTTP_ERRORS = (requests.exceptions.RequestException,
socket.error, ssl.SSLError)
- def __init__(self, root, **headers):
+ def __init__(self, root, session, **headers):
self.root = root
self.last_result = None
self.success_flag = None
+ self.session = session
self.get_headers = {'Accept': 'application/octet-stream'}
self.get_headers.update(headers)
self.put_headers = headers
_logger.debug("Request: GET %s", url)
try:
with timer.Timer() as t:
- result = requests.get(url.encode('utf-8'),
+ result = self.session.get(url.encode('utf-8'),
headers=self.get_headers,
timeout=timeout)
except self.HTTP_ERRORS as e:
content = result.content
_logger.info("%s response: %s bytes in %s msec (%.3f MiB/sec)",
self.last_status(), len(content), t.msecs,
- (len(content)/(1024.0*1024))/t.secs)
+ (len(content)/(1024.0*1024))/t.secs if t.secs > 0 else 0)
if self.success_flag:
resp_md5 = hashlib.md5(content).hexdigest()
if resp_md5 == locator.md5sum:
url = self.root + hash_s
_logger.debug("Request: PUT %s", url)
try:
- result = requests.put(url.encode('utf-8'),
+ result = self.session.put(url.encode('utf-8'),
data=body,
headers=self.put_headers,
timeout=timeout)
def run_with_limiter(self, limiter):
if self.service.finished():
return
- _logger.debug("KeepWriterThread %s proceeding %s %s",
+ _logger.debug("KeepWriterThread %s proceeding %s+%i %s",
str(threading.current_thread()),
self.args['data_hash'],
+ len(self.args['data']),
self.args['service_root'])
self._success = bool(self.service.put(
self.args['data_hash'],
status = self.service.last_status()
if self._success:
result = self.service.last_result
- _logger.debug("KeepWriterThread %s succeeded %s %s",
+ _logger.debug("KeepWriterThread %s succeeded %s+%i %s",
str(threading.current_thread()),
self.args['data_hash'],
+ len(self.args['data']),
self.args['service_root'])
# Tick the 'done' counter for the number of replica
# reported stored by the server, for the case that
def __init__(self, api_client=None, proxy=None,
timeout=DEFAULT_TIMEOUT, proxy_timeout=DEFAULT_PROXY_TIMEOUT,
api_token=None, local_store=None, block_cache=None,
- num_retries=0):
+ num_retries=0, session=None):
"""Initialize a new KeepClient.
Arguments:
- * api_client: The API client to use to find Keep services. If not
+ :api_client:
+ The API client to use to find Keep services. If not
provided, KeepClient will build one from available Arvados
configuration.
- * proxy: If specified, this KeepClient will send requests to this
- Keep proxy. Otherwise, KeepClient will fall back to the setting
- of the ARVADOS_KEEP_PROXY configuration setting. If you want to
- ensure KeepClient does not use a proxy, pass in an empty string.
- * timeout: The timeout (in seconds) for HTTP requests to Keep
+
+ :proxy:
+ If specified, this KeepClient will send requests to this Keep
+ proxy. Otherwise, KeepClient will fall back to the setting of the
+ ARVADOS_KEEP_PROXY configuration setting. If you want to ensure
+ KeepClient does not use a proxy, pass in an empty string.
+
+ :timeout:
+ The timeout (in seconds) for HTTP requests to Keep
non-proxy servers. A tuple of two floats is interpreted as
(connection_timeout, read_timeout): see
http://docs.python-requests.org/en/latest/user/advanced/#timeouts.
Default: (2, 300).
- * proxy_timeout: The timeout (in seconds) for HTTP requests to
+
+ :proxy_timeout:
+ The timeout (in seconds) for HTTP requests to
Keep proxies. A tuple of two floats is interpreted as
(connection_timeout, read_timeout). Default: (20, 300).
- * api_token: If you're not using an API client, but only talking
+
+ :api_token:
+ If you're not using an API client, but only talking
directly to a Keep proxy, this parameter specifies an API token
to authenticate Keep requests. It is an error to specify both
api_client and api_token. If you specify neither, KeepClient
will use one available from the Arvados configuration.
- * local_store: If specified, this KeepClient will bypass Keep
+
+ :local_store:
+ If specified, this KeepClient will bypass Keep
services, and save data to the named directory. If unspecified,
KeepClient will fall back to the setting of the $KEEP_LOCAL_STORE
environment variable. If you want to ensure KeepClient does not
use local storage, pass in an empty string. This is primarily
intended to mock a server for testing.
- * num_retries: The default number of times to retry failed requests.
+
+ :num_retries:
+ The default number of times to retry failed requests.
This will be used as the default num_retries value when get() and
put() are called. Default 0.
+
+ :session:
+ The requests.Session object to use for get() and put() requests.
+ Will create one if not specified.
"""
self.lock = threading.Lock()
if proxy is None:
self.put = self.local_store_put
else:
self.num_retries = num_retries
+ self.session = session if session is not None else requests.Session()
if proxy:
if not proxy.endswith('/'):
proxy += '/'
local_roots = self.weighted_service_roots(md5_s, force_rebuild)
for root in local_roots:
if root not in roots_map:
- roots_map[root] = self.KeepService(root, **headers)
+ roots_map[root] = self.KeepService(root, self.session, **headers)
return local_roots
@staticmethod
else:
return None
+ def get_from_cache(self, loc):
+ """Fetch a block only if is in the cache, otherwise return None."""
+ slot = self.block_cache.get(loc)
+ if slot.ready.is_set():
+ return slot.get()
+ else:
+ return None
+
@retry.retry_method
def get(self, loc_s, num_retries=None):
"""Get data from Keep.
return ''.join(self.get(x) for x in loc_s.split(','))
locator = KeepLocator(loc_s)
expect_hash = locator.md5sum
-
slot, first = self.block_cache.reserve_cache(expect_hash)
if not first:
v = slot.get()
hint_roots = ['http://keep.{}.arvadosapi.com/'.format(hint[2:])
for hint in locator.hints if hint.startswith('K@')]
# Map root URLs their KeepService objects.
- roots_map = {root: self.KeepService(root) for root in hint_roots}
+ roots_map = {root: self.KeepService(root, self.session) for root in hint_roots}
blob = None
loop = retry.RetryLoop(num_retries, self._check_loop_result,
backoff_start=2)
return ''
with open(os.path.join(self.local_store, locator.md5sum), 'r') as f:
return f.read()
+
+ def is_cached(self, locator):
+ return self.block_cache.reserve_cache(expect_hash)
--- /dev/null
+import threading
+import api
+import keep
+import config
+import copy
+
+class ThreadSafeApiCache(object):
+ """Threadsafe wrapper for API objects.
+
+ This stores and returns a different api object per thread, because httplib2
+ which underlies apiclient is not threadsafe.
+
+ """
+
+ def __init__(self, apiconfig=None, keep_params={}):
+ if apiconfig is None:
+ apiconfig = config.settings()
+ self.apiconfig = copy.copy(apiconfig)
+ self.local = threading.local()
+ self.keep = keep.KeepClient(api_client=self, **keep_params)
+
+ def localapi(self):
+ if 'api' not in self.local.__dict__:
+ self.local.api = api.api_from_config('v1', apiconfig=self.apiconfig)
+ return self.local.api
+
+ def __getattr__(self, name):
+ # Proxy nonexistent attributes to the thread-local API client.
+ if name == "api_token":
+ return self.apiconfig['ARVADOS_API_TOKEN']
+ return getattr(self.localapi(), name)
-import bz2
import collections
import hashlib
import os
import re
-import zlib
+import threading
+import functools
+import copy
-from .arvfile import ArvadosFileBase
+from ._ranges import locators_and_ranges, Range
+from .arvfile import StreamFileReader
from arvados.retry import retry_method
from keep import *
import config
import errors
-
-LOCATOR = 0
-BLOCKSIZE = 1
-OFFSET = 2
-SEGMENTSIZE = 3
-
-def locators_and_ranges(data_locators, range_start, range_size, debug=False):
- '''
- Get blocks that are covered by the range
- data_locators: list of [locator, block_size, block_start], assumes that blocks are in order and contigous
- range_start: start of range
- range_size: size of range
- returns list of [block locator, blocksize, segment offset, segment size] that satisfies the range
- '''
- if range_size == 0:
- return []
- resp = []
- range_start = long(range_start)
- range_size = long(range_size)
- range_end = range_start + range_size
- block_start = 0L
-
- # range_start/block_start is the inclusive lower bound
- # range_end/block_end is the exclusive upper bound
-
- hi = len(data_locators)
- lo = 0
- i = int((hi + lo) / 2)
- block_size = data_locators[i][BLOCKSIZE]
- block_start = data_locators[i][OFFSET]
- block_end = block_start + block_size
- if debug: print '---'
-
- # perform a binary search for the first block
- # assumes that all of the blocks are contigious, so range_start is guaranteed
- # to either fall into the range of a block or be outside the block range entirely
- while not (range_start >= block_start and range_start < block_end):
- if lo == i:
- # must be out of range, fail
- return []
- if range_start > block_start:
- lo = i
- else:
- hi = i
- i = int((hi + lo) / 2)
- if debug: print lo, i, hi
- block_size = data_locators[i][BLOCKSIZE]
- block_start = data_locators[i][OFFSET]
- block_end = block_start + block_size
-
- while i < len(data_locators):
- locator, block_size, block_start = data_locators[i]
- block_end = block_start + block_size
- if debug:
- print locator, "range_start", range_start, "block_start", block_start, "range_end", range_end, "block_end", block_end
- if range_end <= block_start:
- # range ends before this block starts, so don't look at any more locators
- break
-
- #if range_start >= block_end:
- # range starts after this block ends, so go to next block
- # we should always start at the first block due to the binary above, so this test is redundant
- #next
-
- if range_start >= block_start and range_end <= block_end:
- # range starts and ends in this block
- resp.append([locator, block_size, range_start - block_start, range_size])
- elif range_start >= block_start and range_end > block_end:
- # range starts in this block
- resp.append([locator, block_size, range_start - block_start, block_end - range_start])
- elif range_start < block_start and range_end > block_end:
- # range starts in a previous block and extends to further blocks
- resp.append([locator, block_size, 0L, block_size])
- elif range_start < block_start and range_end <= block_end:
- # range starts in a previous block and ends in this block
- resp.append([locator, block_size, 0L, range_end - block_start])
- block_start = block_end
- i += 1
- return resp
-
-def split(path):
- """split(path) -> streamname, filename
-
- Separate the stream name and file name in a /-separated stream path.
- If no stream name is available, assume '.'.
- """
- try:
- stream_name, file_name = path.rsplit('/', 1)
- except ValueError: # No / in string
- stream_name, file_name = '.', path
- return stream_name, file_name
-
-class StreamFileReader(ArvadosFileBase):
- class _NameAttribute(str):
- # The Python file API provides a plain .name attribute.
- # Older SDK provided a name() method.
- # This class provides both, for maximum compatibility.
- def __call__(self):
- return self
-
-
- def __init__(self, stream, segments, name):
- super(StreamFileReader, self).__init__(self._NameAttribute(name), 'rb')
- self._stream = stream
- self.segments = segments
- self._filepos = 0L
- self.num_retries = stream.num_retries
- self._readline_cache = (None, None)
-
- def __iter__(self):
- while True:
- data = self.readline()
- if not data:
- break
- yield data
-
- def decompressed_name(self):
- return re.sub('\.(bz2|gz)$', '', self.name)
-
- def stream_name(self):
- return self._stream.name()
-
- @ArvadosFileBase._before_close
- def seek(self, pos, whence=os.SEEK_CUR):
- if whence == os.SEEK_CUR:
- pos += self._filepos
- elif whence == os.SEEK_END:
- pos += self.size()
- self._filepos = min(max(pos, 0L), self.size())
-
- def tell(self):
- return self._filepos
-
- def size(self):
- n = self.segments[-1]
- return n[OFFSET] + n[BLOCKSIZE]
-
- @ArvadosFileBase._before_close
- @retry_method
- def read(self, size, num_retries=None):
- """Read up to 'size' bytes from the stream, starting at the current file position"""
- if size == 0:
- return ''
-
- data = ''
- available_chunks = locators_and_ranges(self.segments, self._filepos, size)
- if available_chunks:
- locator, blocksize, segmentoffset, segmentsize = available_chunks[0]
- data = self._stream.readfrom(locator+segmentoffset, segmentsize,
- num_retries=num_retries)
-
- self._filepos += len(data)
- return data
-
- @ArvadosFileBase._before_close
- @retry_method
- def readfrom(self, start, size, num_retries=None):
- """Read up to 'size' bytes from the stream, starting at 'start'"""
- if size == 0:
- return ''
-
- data = []
- for locator, blocksize, segmentoffset, segmentsize in locators_and_ranges(self.segments, start, size):
- data.append(self._stream.readfrom(locator+segmentoffset, segmentsize,
- num_retries=num_retries))
- return ''.join(data)
-
- @ArvadosFileBase._before_close
- @retry_method
- def readall(self, size=2**20, num_retries=None):
- while True:
- data = self.read(size, num_retries=num_retries)
- if data == '':
- break
- yield data
-
- @ArvadosFileBase._before_close
- @retry_method
- def readline(self, size=float('inf'), num_retries=None):
- cache_pos, cache_data = self._readline_cache
- if self.tell() == cache_pos:
- data = [cache_data]
- else:
- data = ['']
- data_size = len(data[-1])
- while (data_size < size) and ('\n' not in data[-1]):
- next_read = self.read(2 ** 20, num_retries=num_retries)
- if not next_read:
- break
- data.append(next_read)
- data_size += len(next_read)
- data = ''.join(data)
- try:
- nextline_index = data.index('\n') + 1
- except ValueError:
- nextline_index = len(data)
- nextline_index = min(nextline_index, size)
- self._readline_cache = (self.tell(), data[nextline_index:])
- return data[:nextline_index]
-
- @ArvadosFileBase._before_close
- @retry_method
- def decompress(self, decompress, size, num_retries=None):
- for segment in self.readall(size, num_retries):
- data = decompress(segment)
- if data:
- yield data
-
- @ArvadosFileBase._before_close
- @retry_method
- def readall_decompressed(self, size=2**20, num_retries=None):
- self.seek(0)
- if self.name.endswith('.bz2'):
- dc = bz2.BZ2Decompressor()
- return self.decompress(dc.decompress, size,
- num_retries=num_retries)
- elif self.name.endswith('.gz'):
- dc = zlib.decompressobj(16+zlib.MAX_WBITS)
- return self.decompress(lambda segment: dc.decompress(dc.unconsumed_tail + segment),
- size, num_retries=num_retries)
- else:
- return self.readall(size, num_retries=num_retries)
-
- @ArvadosFileBase._before_close
- @retry_method
- def readlines(self, sizehint=float('inf'), num_retries=None):
- data = []
- data_size = 0
- for s in self.readall(num_retries=num_retries):
- data.append(s)
- data_size += len(s)
- if data_size >= sizehint:
- break
- return ''.join(data).splitlines(True)
-
- def as_manifest(self):
- manifest_text = ['.']
- manifest_text.extend([d[LOCATOR] for d in self._stream._data_locators])
- manifest_text.extend(["{}:{}:{}".format(seg[LOCATOR], seg[BLOCKSIZE], self.name().replace(' ', '\\040')) for seg in self.segments])
- return arvados.CollectionReader(' '.join(manifest_text) + '\n').manifest_text(normalize=True)
-
+from _normalize_stream import normalize_stream
class StreamReader(object):
def __init__(self, tokens, keep=None, debug=False, _empty=False,
s = re.match(r'^[0-9a-f]{32}\+(\d+)(\+\S+)*$', tok)
if s:
blocksize = long(s.group(1))
- self._data_locators.append([tok, blocksize, streamoffset])
+ self._data_locators.append(Range(tok, streamoffset, blocksize))
streamoffset += blocksize
continue
size = long(s.group(2))
name = s.group(3).replace('\\040', ' ')
if name not in self._files:
- self._files[name] = StreamFileReader(self, [[pos, size, 0]], name)
+ self._files[name] = StreamFileReader(self, [Range(pos, 0, size)], name)
else:
- n = self._files[name]
- n.segments.append([pos, size, n.size()])
+ filereader = self._files[name]
+ filereader.segments.append(Range(pos, filereader.size(), size))
continue
raise errors.SyntaxError("Invalid manifest format")
def all_files(self):
return self._files.values()
- def size(self):
+ def _size(self):
n = self._data_locators[-1]
- return n[OFFSET] + n[BLOCKSIZE]
+ return n.range_start + n.range_size
+
+ def size(self):
+ return self._size()
def locators_and_ranges(self, range_start, range_size):
return locators_and_ranges(self._data_locators, range_start, range_size)
+ @retry_method
+ def _keepget(self, locator, num_retries=None):
+ return self._keep.get(locator, num_retries=num_retries)
+
@retry_method
def readfrom(self, start, size, num_retries=None):
"""Read up to 'size' bytes from the stream, starting at 'start'"""
if self._keep is None:
self._keep = KeepClient(num_retries=self.num_retries)
data = []
- for locator, blocksize, segmentoffset, segmentsize in locators_and_ranges(self._data_locators, start, size):
- data.append(self._keep.get(locator, num_retries=num_retries)[segmentoffset:segmentoffset+segmentsize])
+ for lr in locators_and_ranges(self._data_locators, start, size):
+ data.append(self._keepget(lr.locator, num_retries=num_retries)[lr.segment_offset:lr.segment_offset+lr.segment_size])
return ''.join(data)
def manifest_text(self, strip=False):
manifest_text = [self.name().replace(' ', '\\040')]
if strip:
for d in self._data_locators:
- m = re.match(r'^[0-9a-f]{32}\+\d+', d[LOCATOR])
+ m = re.match(r'^[0-9a-f]{32}\+\d+', d.locator)
manifest_text.append(m.group(0))
else:
- manifest_text.extend([d[LOCATOR] for d in self._data_locators])
- manifest_text.extend([' '.join(["{}:{}:{}".format(seg[LOCATOR], seg[BLOCKSIZE], f.name().replace(' ', '\\040'))
+ manifest_text.extend([d.locator for d in self._data_locators])
+ manifest_text.extend([' '.join(["{}:{}:{}".format(seg.locator, seg.range_size, f.name.replace(' ', '\\040'))
for seg in f.segments])
for f in self._files.values()])
return ' '.join(manifest_text) + '\n'
r.raw = io.BytesIO(body)
return r
-def mock_get_responses(body, *codes, **headers):
- return mock.patch('requests.get', side_effect=queue_with((
- fake_requests_response(code, body, **headers) for code in codes)))
+# The following methods patch requests.Session(), where return_value is a mock
+# Session object. The put/get attributes are set on mock Session, and the
+# desired put/get behavior is set on the put/get mocks.
def mock_put_responses(body, *codes, **headers):
- return mock.patch('requests.put', side_effect=queue_with((
- fake_requests_response(code, body, **headers) for code in codes)))
+ m = mock.MagicMock()
+ if isinstance(body, tuple):
+ codes = list(codes)
+ codes.insert(0, body)
+ m.return_value.put.side_effect = queue_with((fake_requests_response(code, b, **headers) for b, code in codes))
+ else:
+ m.return_value.put.side_effect = queue_with((fake_requests_response(code, body, **headers) for code in codes))
+ return mock.patch('requests.Session', m)
+
+def mock_get_responses(body, *codes, **headers):
+ m = mock.MagicMock()
+ m.return_value.get.side_effect = queue_with((fake_requests_response(code, body, **headers) for code in codes))
+ return mock.patch('requests.Session', m)
-def mock_requestslib_responses(method, body, *codes, **headers):
- return mock.patch(method, side_effect=queue_with((
- fake_requests_response(code, body, **headers) for code in codes)))
+def mock_get(side_effect):
+ m = mock.MagicMock()
+ m.return_value.get.side_effect = side_effect
+ return mock.patch('requests.Session', m)
+
+def mock_put(side_effect):
+ m = mock.MagicMock()
+ m.return_value.put.side_effect = side_effect
+ return mock.patch('requests.Session', m)
class MockStreamReader(object):
def __init__(self, name='.', *data):
def readfrom(self, start, size, num_retries=None):
return self._data[start:start + size]
-
class ApiClientMock(object):
def api_client_mock(self):
return mock.MagicMock(name='api_client_mock')
# Add the Python SDK source to the library path.
sys.path.insert(1, os.path.dirname(MY_DIRNAME))
-import arvados.api
+import arvados
import arvados.config
ARVADOS_DIR = os.path.realpath(os.path.join(MY_DIRNAME, '../../..'))
def test_cache_names_stable(self):
for argset in self.CACHE_ARGSET:
- self.assertEquals(self.cache_path_from_arglist(argset),
+ self.assertEqual(self.cache_path_from_arglist(argset),
self.cache_path_from_arglist(argset),
"cache name changed for {}".format(argset))
"path too exotic: {}".format(path))
def test_cache_names_ignore_argument_order(self):
- self.assertEquals(
+ self.assertEqual(
self.cache_path_from_arglist(['a', 'b', 'c']),
self.cache_path_from_arglist(['c', 'a', 'b']))
- self.assertEquals(
+ self.assertEqual(
self.cache_path_from_arglist(['-', '--filename', 'stdin']),
self.cache_path_from_arglist(['--filename', 'stdin', '-']))
args = arv_put.parse_arguments(['/tmp'])
args.filename = 'tmp'
path2 = arv_put.ResumeCache.make_path(args)
- self.assertEquals(path1, path2,
+ self.assertEqual(path1, path2,
"cache path considered --filename for directory")
- self.assertEquals(
+ self.assertEqual(
self.cache_path_from_arglist(['-']),
self.cache_path_from_arglist(['-', '--max-manifest-depth', '1']),
"cache path considered --max-manifest-depth for file")
def test_cache_names_treat_negative_manifest_depths_identically(self):
base_args = ['/tmp', '--max-manifest-depth']
- self.assertEquals(
+ self.assertEqual(
self.cache_path_from_arglist(base_args + ['-1']),
self.cache_path_from_arglist(base_args + ['-2']))
def test_cache_names_treat_stdin_consistently(self):
- self.assertEquals(
+ self.assertEqual(
self.cache_path_from_arglist(['-', '--filename', 'test']),
self.cache_path_from_arglist(['/dev/stdin', '--filename', 'test']))
def test_cache_names_identical_for_synonymous_names(self):
- self.assertEquals(
+ self.assertEqual(
self.cache_path_from_arglist(['.']),
self.cache_path_from_arglist([os.path.realpath('.')]))
testdir = self.make_tmpdir()
looplink = os.path.join(testdir, 'loop')
os.symlink(testdir, looplink)
- self.assertEquals(
+ self.assertEqual(
self.cache_path_from_arglist([testdir]),
self.cache_path_from_arglist([looplink]))
with tempfile.NamedTemporaryFile() as cachefile:
self.last_cache = arv_put.ResumeCache(cachefile.name)
self.last_cache.save(thing)
- self.assertEquals(thing, self.last_cache.load())
+ self.assertEqual(thing, self.last_cache.load())
def test_empty_cache(self):
with tempfile.NamedTemporaryFile() as cachefile:
cache.save(thing)
cache.close()
self.last_cache = arv_put.ResumeCache(path)
- self.assertEquals(thing, self.last_cache.load())
+ self.assertEqual(thing, self.last_cache.load())
def test_multiple_cache_writes(self):
thing = ['short', 'list']
# sure the cache file gets truncated.
self.last_cache.save(['long', 'long', 'list'])
self.last_cache.save(thing)
- self.assertEquals(thing, self.last_cache.load())
+ self.assertEqual(thing, self.last_cache.load())
def test_cache_is_locked(self):
with tempfile.NamedTemporaryFile() as cachefile:
cwriter.write_file('/dev/null')
cwriter.cache_state()
self.assertTrue(self.cache.load())
- self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
+ self.assertEqual(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
def test_writer_works_without_cache(self):
cwriter = arv_put.ArvPutCollectionWriter()
cwriter.write_file('/dev/null')
- self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
+ self.assertEqual(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
def test_writer_resumes_from_cache(self):
cwriter = arv_put.ArvPutCollectionWriter(self.cache)
cwriter.cache_state()
new_writer = arv_put.ArvPutCollectionWriter.from_cache(
self.cache)
- self.assertEquals(
+ self.assertEqual(
". 098f6bcd4621d373cade4e832627b4f6+4 0:4:test\n",
new_writer.manifest_text())
cwriter.write_file(testfile.name, 'test')
new_writer = arv_put.ArvPutCollectionWriter.from_cache(self.cache)
new_writer.write_file('/dev/null')
- self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", new_writer.manifest_text())
+ self.assertEqual(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", new_writer.manifest_text())
def test_new_writer_from_empty_cache(self):
cwriter = arv_put.ArvPutCollectionWriter.from_cache(self.cache)
cwriter.write_file('/dev/null')
- self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
+ self.assertEqual(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
def test_writer_resumable_after_arbitrary_bytes(self):
cwriter = arv_put.ArvPutCollectionWriter(self.cache)
cwriter.cache_state()
new_writer = arv_put.ArvPutCollectionWriter.from_cache(
self.cache)
- self.assertEquals(cwriter.manifest_text(), new_writer.manifest_text())
+ self.assertEqual(cwriter.manifest_text(), new_writer.manifest_text())
def make_progress_tester(self):
progression = []
TEST_SIZE = os.path.getsize(__file__)
def test_expected_bytes_for_file(self):
- self.assertEquals(self.TEST_SIZE,
+ self.assertEqual(self.TEST_SIZE,
arv_put.expected_bytes_for([__file__]))
def test_expected_bytes_for_tree(self):
tree = self.make_tmpdir()
shutil.copyfile(__file__, os.path.join(tree, 'one'))
shutil.copyfile(__file__, os.path.join(tree, 'two'))
- self.assertEquals(self.TEST_SIZE * 2,
+ self.assertEqual(self.TEST_SIZE * 2,
arv_put.expected_bytes_for([tree]))
- self.assertEquals(self.TEST_SIZE * 3,
+ self.assertEqual(self.TEST_SIZE * 3,
arv_put.expected_bytes_for([tree, __file__]))
def test_expected_bytes_for_device(self):
--- /dev/null
+#!/usr/bin/env python
+
+import bz2
+import gzip
+import io
+import mock
+import os
+import unittest
+import hashlib
+
+import arvados
+from arvados._ranges import Range
+from arvados.keep import KeepLocator
+from arvados.collection import Collection, CollectionReader
+from arvados.arvfile import ArvadosFile, ArvadosFileReader
+
+import arvados_testutil as tutil
+from test_stream import StreamFileReaderTestCase, StreamRetryTestMixin
+
+class ArvadosFileWriterTestCase(unittest.TestCase):
+ class MockKeep(object):
+ def __init__(self, blocks):
+ self.blocks = blocks
+ self.requests = []
+ def get(self, locator, num_retries=0):
+ self.requests.append(locator)
+ return self.blocks.get(locator)
+ def get_from_cache(self, locator):
+ self.requests.append(locator)
+ return self.blocks.get(locator)
+ def put(self, data, num_retries=None):
+ pdh = "%s+%i" % (hashlib.md5(data).hexdigest(), len(data))
+ self.blocks[pdh] = str(data)
+ return pdh
+
+ class MockApi(object):
+ def __init__(self, b, r):
+ self.body = b
+ self.response = r
+ class MockCollections(object):
+ def __init__(self, b, r):
+ self.body = b
+ self.response = r
+ class Execute(object):
+ def __init__(self, r):
+ self.response = r
+ def execute(self, num_retries=None):
+ return self.response
+ def create(self, ensure_unique_name=False, body=None):
+ if body != self.body:
+ raise Exception("Body %s does not match expectation %s" % (body, self.body))
+ return ArvadosFileWriterTestCase.MockApi.MockCollections.Execute(self.response)
+ def update(self, uuid=None, body=None):
+ return ArvadosFileWriterTestCase.MockApi.MockCollections.Execute(self.response)
+ def collections(self):
+ return ArvadosFileWriterTestCase.MockApi.MockCollections(self.body, self.response)
+
+
+ def test_truncate(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+ api = ArvadosFileWriterTestCase.MockApi({"name":"test_truncate",
+ "manifest_text":". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\n"},
+ {"uuid":"zzzzz-4zz18-mockcollection0",
+ "manifest_text":". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\n"})
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+ api_client=api, keep_client=keep) as c:
+ writer = c.open("count.txt", "r+")
+ self.assertEqual(writer.size(), 10)
+ self.assertEqual("0123456789", writer.read(12))
+
+ writer.truncate(8)
+
+ # Make sure reading off the end doesn't break
+ self.assertEqual("", writer.read(12))
+
+ self.assertEqual(writer.size(), 8)
+ writer.seek(0, os.SEEK_SET)
+ self.assertEqual("01234567", writer.read(12))
+
+ self.assertEqual(None, c._manifest_locator)
+ self.assertEqual(True, c.modified())
+ c.save_new("test_truncate")
+ self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+ self.assertEqual(False, c.modified())
+
+ def test_write_to_end(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+ api = ArvadosFileWriterTestCase.MockApi({"name":"test_append",
+ "manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:13:count.txt\n"},
+ {"uuid":"zzzzz-4zz18-mockcollection0",
+ "manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:13:count.txt\n"})
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+ api_client=api, keep_client=keep) as c:
+ writer = c.open("count.txt", "r+")
+ self.assertEqual(writer.size(), 10)
+
+ writer.seek(5, os.SEEK_SET)
+ self.assertEqual("56789", writer.read(8))
+
+ writer.seek(10, os.SEEK_SET)
+ writer.write("foo")
+ self.assertEqual(writer.size(), 13)
+
+ writer.seek(5, os.SEEK_SET)
+ self.assertEqual("56789foo", writer.read(8))
+
+ self.assertEqual(None, c._manifest_locator)
+ self.assertEqual(True, c.modified())
+ self.assertEqual(None, keep.get("acbd18db4cc2f85cedef654fccc4a4d8+3"))
+
+ c.save_new("test_append")
+ self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+ self.assertEqual(False, c.modified())
+ self.assertEqual("foo", keep.get("acbd18db4cc2f85cedef654fccc4a4d8+3"))
+
+
+ def test_append(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+ c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n', keep_client=keep)
+ writer = c.open("count.txt", "a+")
+ self.assertEqual(writer.read(20), "0123456789")
+ writer.seek(0, os.SEEK_SET)
+
+ writer.write("hello")
+ self.assertEqual(writer.read(20), "0123456789hello")
+ writer.seek(0, os.SEEK_SET)
+
+ writer.write("world")
+ self.assertEqual(writer.read(20), "0123456789helloworld")
+
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 fc5e038d38a57032085441e7fe7010b0+10 0:20:count.txt\n", c.manifest_text())
+
+ def test_write_at_beginning(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+ keep_client=keep) as c:
+ writer = c.open("count.txt", "r+")
+ self.assertEqual("0123456789", writer.readfrom(0, 13))
+ writer.seek(0, os.SEEK_SET)
+ writer.write("foo")
+ self.assertEqual(writer.size(), 10)
+ self.assertEqual("foo3456789", writer.readfrom(0, 13))
+ self.assertEqual(". acbd18db4cc2f85cedef654fccc4a4d8+3 781e5e245d69b566979b86e28d23f2c7+10 0:3:count.txt 6:7:count.txt\n", c.manifest_text())
+
+ def test_write_in_middle(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+ keep_client=keep) as c:
+ writer = c.open("count.txt", "r+")
+ self.assertEqual("0123456789", writer.readfrom(0, 13))
+ writer.seek(3, os.SEEK_SET)
+ writer.write("foo")
+ self.assertEqual(writer.size(), 10)
+ self.assertEqual("012foo6789", writer.readfrom(0, 13))
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:count.txt 10:3:count.txt 6:4:count.txt\n", c.manifest_text())
+
+ def test_write_at_end(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+ keep_client=keep) as c:
+ writer = c.open("count.txt", "r+")
+ self.assertEqual("0123456789", writer.readfrom(0, 13))
+ writer.seek(7, os.SEEK_SET)
+ writer.write("foo")
+ self.assertEqual(writer.size(), 10)
+ self.assertEqual("0123456foo", writer.readfrom(0, 13))
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:7:count.txt 10:3:count.txt\n", c.manifest_text())
+
+ def test_write_across_segment_boundary(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt 0:10:count.txt\n',
+ keep_client=keep) as c:
+ writer = c.open("count.txt", "r+")
+ self.assertEqual("012345678901234", writer.readfrom(0, 15))
+ writer.seek(7, os.SEEK_SET)
+ writer.write("foobar")
+ self.assertEqual(writer.size(), 20)
+ self.assertEqual("0123456foobar34", writer.readfrom(0, 15))
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 3858f62230ac3c915f300c664312c63f+6 0:7:count.txt 10:6:count.txt 3:7:count.txt\n", c.manifest_text())
+
+ def test_write_across_several_segments(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:4:count.txt 0:4:count.txt 0:4:count.txt',
+ keep_client=keep) as c:
+ writer = c.open("count.txt", "r+")
+ self.assertEqual("012301230123", writer.readfrom(0, 15))
+ writer.seek(2, os.SEEK_SET)
+ writer.write("abcdefg")
+ self.assertEqual(writer.size(), 12)
+ self.assertEqual("01abcdefg123", writer.readfrom(0, 15))
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 7ac66c0f148de9519b8bd264312c4d64+7 0:2:count.txt 10:7:count.txt 1:3:count.txt\n", c.manifest_text())
+
+ def test_write_large(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({})
+ api = ArvadosFileWriterTestCase.MockApi({"name":"test_write_large",
+ "manifest_text": ". a5de24f4417cfba9d5825eadc2f4ca49+67108000 598cc1a4ccaef8ab6e4724d87e675d78+32892000 0:100000000:count.txt\n"},
+ {"uuid":"zzzzz-4zz18-mockcollection0",
+ "manifest_text": ". a5de24f4417cfba9d5825eadc2f4ca49+67108000 598cc1a4ccaef8ab6e4724d87e675d78+32892000 0:100000000:count.txt\n"})
+ with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
+ api_client=api, keep_client=keep) as c:
+ writer = c.open("count.txt", "r+")
+ text = ''.join(["0123456789" for a in xrange(0, 100)])
+ for b in xrange(0, 100000):
+ writer.write(text)
+ self.assertEqual(writer.size(), 100000000)
+
+ self.assertEqual(None, c._manifest_locator)
+ self.assertEqual(True, c.modified())
+ c.save_new("test_write_large")
+ self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+ self.assertEqual(False, c.modified())
+
+ def test_rewrite_on_empty_file(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({})
+ with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
+ keep_client=keep) as c:
+ writer = c.open("count.txt", "r+")
+ for b in xrange(0, 10):
+ writer.seek(0, os.SEEK_SET)
+ writer.write("0123456789")
+ writer.arvadosfile._repack_writes()
+ self.assertEqual(writer.size(), 10)
+ self.assertEqual("0123456789", writer.readfrom(0, 20))
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n", c.manifest_text())
+
+ def test_rewrite_append_existing_file(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt',
+ keep_client=keep) as c:
+ writer = c.open("count.txt", "r+")
+ for b in xrange(0, 10):
+ writer.seek(10, os.SEEK_SET)
+ writer.write("abcdefghij")
+ writer.arvadosfile._repack_writes()
+ self.assertEqual(writer.size(), 20)
+ self.assertEqual("0123456789abcdefghij", writer.readfrom(0, 20))
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 a925576942e94b2ef57a066101b48876+10 0:20:count.txt\n", c.manifest_text())
+
+ def test_rewrite_over_existing_file(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt',
+ keep_client=keep) as c:
+ writer = c.open("count.txt", "r+")
+ for b in xrange(0, 10):
+ writer.seek(5, os.SEEK_SET)
+ writer.write("abcdefghij")
+ writer.arvadosfile._repack_writes()
+ self.assertEqual(writer.size(), 15)
+ self.assertEqual("01234abcdefghij", writer.readfrom(0, 20))
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 a925576942e94b2ef57a066101b48876+10 0:5:count.txt 10:10:count.txt\n", c.manifest_text())
+
+ def test_write_large_rewrite(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({})
+ api = ArvadosFileWriterTestCase.MockApi({"name":"test_write_large",
+ "manifest_text": ". 37400a68af9abdd76ca5bf13e819e42a+32892003 a5de24f4417cfba9d5825eadc2f4ca49+67108000 32892000:3:count.txt 32892006:67107997:count.txt 0:32892000:count.txt\n"},
+ {"uuid":"zzzzz-4zz18-mockcollection0",
+ "manifest_text": ". 37400a68af9abdd76ca5bf13e819e42a+32892003 a5de24f4417cfba9d5825eadc2f4ca49+67108000 32892000:3:count.txt 32892006:67107997:count.txt 0:32892000:count.txt\n"})
+ with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
+ api_client=api, keep_client=keep) as c:
+ writer = c.open("count.txt", "r+")
+ text = ''.join(["0123456789" for a in xrange(0, 100)])
+ for b in xrange(0, 100000):
+ writer.write(text)
+ writer.seek(0, os.SEEK_SET)
+ writer.write("foo")
+ self.assertEqual(writer.size(), 100000000)
+
+ self.assertEqual(None, c._manifest_locator)
+ self.assertEqual(True, c.modified())
+ c.save_new("test_write_large")
+ self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+ self.assertEqual(False, c.modified())
+
+ def test_create(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({})
+ api = ArvadosFileWriterTestCase.MockApi({"name":"test_create",
+ "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"},
+ {"uuid":"zzzzz-4zz18-mockcollection0",
+ "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"})
+ with Collection(api_client=api, keep_client=keep) as c:
+ writer = c.open("count.txt", "w+")
+ self.assertEqual(writer.size(), 0)
+ writer.write("01234567")
+ self.assertEqual(writer.size(), 8)
+
+ self.assertEqual(None, c._manifest_locator)
+ self.assertEqual(True, c.modified())
+ self.assertEqual(None, keep.get("2e9ec317e197819358fbc43afca7d837+8"))
+ c.save_new("test_create")
+ self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+ self.assertEqual(False, c.modified())
+ self.assertEqual("01234567", keep.get("2e9ec317e197819358fbc43afca7d837+8"))
+
+
+ def test_create_subdir(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({})
+ api = ArvadosFileWriterTestCase.MockApi({"name":"test_create",
+ "manifest_text":"./foo/bar 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"},
+ {"uuid":"zzzzz-4zz18-mockcollection0",
+ "manifest_text":"./foo/bar 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"})
+ with Collection(api_client=api, keep_client=keep) as c:
+ self.assertIsNone(c.api_response())
+ writer = c.open("foo/bar/count.txt", "w+")
+ writer.write("01234567")
+ c.save_new("test_create")
+ self.assertEqual(c.api_response(), api.response)
+
+ def test_overwrite(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+ api = ArvadosFileWriterTestCase.MockApi({"name":"test_overwrite",
+ "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"},
+ {"uuid":"zzzzz-4zz18-mockcollection0",
+ "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"})
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
+ api_client=api, keep_client=keep) as c:
+ writer = c.open("count.txt", "w+")
+ self.assertEqual(writer.size(), 0)
+ writer.write("01234567")
+ self.assertEqual(writer.size(), 8)
+
+ self.assertEqual(None, c._manifest_locator)
+ self.assertEqual(True, c.modified())
+ c.save_new("test_overwrite")
+ self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+ self.assertEqual(False, c.modified())
+
+ def test_file_not_found(self):
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n') as c:
+ with self.assertRaises(IOError):
+ writer = c.open("nocount.txt", "r")
+
+ def test_cannot_open_directory(self):
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n') as c:
+ with self.assertRaises(IOError):
+ writer = c.open(".", "r")
+
+ def test_create_multiple(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({})
+ api = ArvadosFileWriterTestCase.MockApi({"name":"test_create_multiple",
+ "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:8:count1.txt 8:8:count2.txt\n"},
+ {"uuid":"zzzzz-4zz18-mockcollection0",
+ "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:8:count1.txt 8:8:count2.txt\n"})
+ with Collection(api_client=api, keep_client=keep) as c:
+ w1 = c.open("count1.txt", "w")
+ w2 = c.open("count2.txt", "w")
+ w1.write("01234567")
+ w2.write("abcdefgh")
+ self.assertEqual(w1.size(), 8)
+ self.assertEqual(w2.size(), 8)
+
+ self.assertEqual(None, c._manifest_locator)
+ self.assertEqual(True, c.modified())
+ self.assertEqual(None, keep.get("2e9ec317e197819358fbc43afca7d837+8"))
+ c.save_new("test_create_multiple")
+ self.assertEqual("zzzzz-4zz18-mockcollection0", c._manifest_locator)
+ self.assertEqual(False, c.modified())
+ self.assertEqual("01234567", keep.get("2e9ec317e197819358fbc43afca7d837+8"))
+
+
+class ArvadosFileReaderTestCase(StreamFileReaderTestCase):
+ class MockParent(object):
+ class MockBlockMgr(object):
+ def __init__(self, blocks, nocache):
+ self.blocks = blocks
+ self.nocache = nocache
+
+ def block_prefetch(self, loc):
+ pass
+
+ def get_block_contents(self, loc, num_retries=0, cache_only=False):
+ if self.nocache and cache_only:
+ return None
+ return self.blocks[loc]
+
+ def __init__(self, blocks, nocache):
+ self.blocks = blocks
+ self.nocache = nocache
+ self.lock = arvados.arvfile.NoopLock()
+
+ def root_collection(self):
+ return self
+
+ def _my_block_manager(self):
+ return ArvadosFileReaderTestCase.MockParent.MockBlockMgr(self.blocks, self.nocache)
+
+ def sync_mode(self):
+ return SYNC_READONLY
+
+
+ def make_count_reader(self, nocache=False):
+ stream = []
+ n = 0
+ blocks = {}
+ for d in ['01234', '34567', '67890']:
+ loc = '{}+{}'.format(hashlib.md5(d).hexdigest(), len(d))
+ blocks[loc] = d
+ stream.append(Range(loc, n, len(d)))
+ n += len(d)
+ af = ArvadosFile(ArvadosFileReaderTestCase.MockParent(blocks, nocache), stream=stream, segments=[Range(1, 0, 3), Range(6, 3, 3), Range(11, 6, 3)])
+ return ArvadosFileReader(af, "count.txt")
+
+ def test_read_returns_first_block(self):
+ # read() calls will be aligned on block boundaries - see #3663.
+ sfile = self.make_count_reader(nocache=True)
+ self.assertEqual('123', sfile.read(10))
+
+ def test_successive_reads(self):
+ sfile = self.make_count_reader(nocache=True)
+ for expect in ['123', '456', '789', '']:
+ self.assertEqual(expect, sfile.read(10))
+
+ def test_tell_after_block_read(self):
+ sfile = self.make_count_reader(nocache=True)
+ sfile.read(5)
+ self.assertEqual(3, sfile.tell())
+
+ def test_prefetch(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({"2e9ec317e197819358fbc43afca7d837+8": "01234567", "e8dc4081b13434b45189a720b77b6818+8": "abcdefgh"})
+ with Collection(". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:16:count.txt\n", keep_client=keep) as c:
+ r = c.open("count.txt", "r")
+ self.assertEqual("0123", r.read(4))
+ self.assertIn("2e9ec317e197819358fbc43afca7d837+8", keep.requests)
+ self.assertIn("e8dc4081b13434b45189a720b77b6818+8", keep.requests)
+
+ def test__eq__from_manifest(self):
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c1:
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c2:
+ self.assertTrue(c1["count1.txt"] == c2["count1.txt"])
+ self.assertFalse(c1["count1.txt"] != c2["count1.txt"])
+
+ def test__eq__from_writes(self):
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c1:
+ with Collection() as c2:
+ with c2.open("count1.txt", "w") as f:
+ f.write("0123456789")
+
+ self.assertTrue(c1["count1.txt"] == c2["count1.txt"])
+ self.assertFalse(c1["count1.txt"] != c2["count1.txt"])
+
+ def test__ne__(self):
+ with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c1:
+ with Collection() as c2:
+ with c2.open("count1.txt", "w") as f:
+ f.write("1234567890")
+
+ self.assertTrue(c1["count1.txt"] != c2["count1.txt"])
+ self.assertFalse(c1["count1.txt"] == c2["count1.txt"])
+
+
+class ArvadosFileReadTestCase(unittest.TestCase, StreamRetryTestMixin):
+ def reader_for(self, coll_name, **kwargs):
+ stream = []
+ segments = []
+ n = 0
+ for d in self.manifest_for(coll_name).split():
+ try:
+ k = KeepLocator(d)
+ segments.append(Range(n, n, k.size))
+ stream.append(Range(d, n, k.size))
+ n += k.size
+ except ValueError:
+ pass
+ col = Collection(keep_client=self.keep_client())
+ col._my_block_manager().prefetch_enabled = False
+ af = ArvadosFile(col,
+ stream=stream,
+ segments=segments)
+ return ArvadosFileReader(af, "test", **kwargs)
+
+ def read_for_test(self, reader, byte_count, **kwargs):
+ return reader.read(byte_count, **kwargs)
+
+
+class ArvadosFileReadFromTestCase(ArvadosFileReadTestCase):
+ def read_for_test(self, reader, byte_count, **kwargs):
+ return reader.readfrom(0, byte_count, **kwargs)
+
+
+class ArvadosFileReadAllTestCase(ArvadosFileReadTestCase):
+ def read_for_test(self, reader, byte_count, **kwargs):
+ return ''.join(reader.readall(**kwargs))
+
+
+class ArvadosFileReadAllDecompressedTestCase(ArvadosFileReadTestCase):
+ def read_for_test(self, reader, byte_count, **kwargs):
+ return ''.join(reader.readall_decompressed(**kwargs))
+
+
+class ArvadosFileReadlinesTestCase(ArvadosFileReadTestCase):
+ def read_for_test(self, reader, byte_count, **kwargs):
+ return ''.join(reader.readlines(**kwargs))
+
+class BlockManagerTest(unittest.TestCase):
+ def test_bufferblock_append(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({})
+ blockmanager = arvados.arvfile._BlockManager(keep)
+ bufferblock = blockmanager.alloc_bufferblock()
+ bufferblock.append("foo")
+
+ self.assertEqual(bufferblock.size(), 3)
+ self.assertEqual(bufferblock.buffer_view[0:3], "foo")
+ self.assertEqual(bufferblock.locator(), "acbd18db4cc2f85cedef654fccc4a4d8+3")
+
+ bufferblock.append("bar")
+
+ self.assertEqual(bufferblock.size(), 6)
+ self.assertEqual(bufferblock.buffer_view[0:6], "foobar")
+ self.assertEqual(bufferblock.locator(), "3858f62230ac3c915f300c664312c63f+6")
+
+ bufferblock.set_state(arvados.arvfile._BufferBlock.PENDING)
+ with self.assertRaises(arvados.errors.AssertionError):
+ bufferblock.append("bar")
+
+ def test_bufferblock_dup(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({})
+ blockmanager = arvados.arvfile._BlockManager(keep)
+ bufferblock = blockmanager.alloc_bufferblock()
+ bufferblock.append("foo")
+
+ self.assertEqual(bufferblock.size(), 3)
+ self.assertEqual(bufferblock.buffer_view[0:3], "foo")
+ self.assertEqual(bufferblock.locator(), "acbd18db4cc2f85cedef654fccc4a4d8+3")
+ bufferblock.set_state(arvados.arvfile._BufferBlock.PENDING)
+
+ bufferblock2 = blockmanager.dup_block(bufferblock, None)
+ self.assertNotEqual(bufferblock.blockid, bufferblock2.blockid)
+
+ bufferblock2.append("bar")
+
+ self.assertEqual(bufferblock2.size(), 6)
+ self.assertEqual(bufferblock2.buffer_view[0:6], "foobar")
+ self.assertEqual(bufferblock2.locator(), "3858f62230ac3c915f300c664312c63f+6")
+
+ self.assertEqual(bufferblock.size(), 3)
+ self.assertEqual(bufferblock.buffer_view[0:3], "foo")
+ self.assertEqual(bufferblock.locator(), "acbd18db4cc2f85cedef654fccc4a4d8+3")
+
+ def test_bufferblock_get(self):
+ keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
+ blockmanager = arvados.arvfile._BlockManager(keep)
+ bufferblock = blockmanager.alloc_bufferblock()
+ bufferblock.append("foo")
+
+ self.assertEqual(blockmanager.get_block_contents("781e5e245d69b566979b86e28d23f2c7+10", 1), "0123456789")
+ self.assertEqual(blockmanager.get_block_contents(bufferblock.blockid, 1), "foo")
+
+ def test_bufferblock_commit(self):
+ mockkeep = mock.MagicMock()
+ blockmanager = arvados.arvfile._BlockManager(mockkeep)
+ bufferblock = blockmanager.alloc_bufferblock()
+ bufferblock.append("foo")
+ blockmanager.commit_all()
+ self.assertTrue(mockkeep.put.called)
+ self.assertEqual(bufferblock.state(), arvados.arvfile._BufferBlock.COMMITTED)
+ self.assertIsNone(bufferblock.buffer_view)
+
+
+ def test_bufferblock_commit_with_error(self):
+ mockkeep = mock.MagicMock()
+ mockkeep.put.side_effect = arvados.errors.KeepWriteError("fail")
+ blockmanager = arvados.arvfile._BlockManager(mockkeep)
+ bufferblock = blockmanager.alloc_bufferblock()
+ bufferblock.append("foo")
+ with self.assertRaises(arvados.errors.KeepWriteError) as err:
+ blockmanager.commit_all()
+ self.assertEqual(str(err.exception), "Error writing some blocks: acbd18db4cc2f85cedef654fccc4a4d8+3 raised KeepWriteError (fail)")
+ self.assertEqual(bufferblock.state(), arvados.arvfile._BufferBlock.PENDING)
import unittest
import run_test_server
+from arvados._ranges import Range, LocatorAndRange
+from arvados.collection import Collection, CollectionReader
import arvados_testutil as tutil
class TestResumableWriter(arvados.ResumableCollectionWriter):
self.assertEqual(arvados.CollectionReader(m8, self.api_client).manifest_text(normalize=True), m8)
def test_locators_and_ranges(self):
- blocks2 = [['a', 10, 0],
- ['b', 10, 10],
- ['c', 10, 20],
- ['d', 10, 30],
- ['e', 10, 40],
- ['f', 10, 50]]
-
- self.assertEqual(arvados.locators_and_ranges(blocks2, 2, 2), [['a', 10, 2, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 12, 2), [['b', 10, 2, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 22, 2), [['c', 10, 2, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 32, 2), [['d', 10, 2, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 42, 2), [['e', 10, 2, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 52, 2), [['f', 10, 2, 2]])
+ blocks2 = [Range('a', 0, 10),
+ Range('b', 10, 10),
+ Range('c', 20, 10),
+ Range('d', 30, 10),
+ Range('e', 40, 10),
+ Range('f', 50, 10)]
+
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 2, 2), [LocatorAndRange('a', 10, 2, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 12, 2), [LocatorAndRange('b', 10, 2, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 22, 2), [LocatorAndRange('c', 10, 2, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 32, 2), [LocatorAndRange('d', 10, 2, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 42, 2), [LocatorAndRange('e', 10, 2, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 52, 2), [LocatorAndRange('f', 10, 2, 2)])
self.assertEqual(arvados.locators_and_ranges(blocks2, 62, 2), [])
self.assertEqual(arvados.locators_and_ranges(blocks2, -2, 2), [])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 0, 2), [['a', 10, 0, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 10, 2), [['b', 10, 0, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 20, 2), [['c', 10, 0, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 30, 2), [['d', 10, 0, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 40, 2), [['e', 10, 0, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 50, 2), [['f', 10, 0, 2]])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 0, 2), [LocatorAndRange('a', 10, 0, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 10, 2), [LocatorAndRange('b', 10, 0, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 20, 2), [LocatorAndRange('c', 10, 0, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 30, 2), [LocatorAndRange('d', 10, 0, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 40, 2), [LocatorAndRange('e', 10, 0, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 50, 2), [LocatorAndRange('f', 10, 0, 2)])
self.assertEqual(arvados.locators_and_ranges(blocks2, 60, 2), [])
self.assertEqual(arvados.locators_and_ranges(blocks2, -2, 2), [])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 9, 2), [['a', 10, 9, 1], ['b', 10, 0, 1]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 19, 2), [['b', 10, 9, 1], ['c', 10, 0, 1]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 29, 2), [['c', 10, 9, 1], ['d', 10, 0, 1]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 39, 2), [['d', 10, 9, 1], ['e', 10, 0, 1]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 49, 2), [['e', 10, 9, 1], ['f', 10, 0, 1]])
- self.assertEqual(arvados.locators_and_ranges(blocks2, 59, 2), [['f', 10, 9, 1]])
-
-
- blocks3 = [['a', 10, 0],
- ['b', 10, 10],
- ['c', 10, 20],
- ['d', 10, 30],
- ['e', 10, 40],
- ['f', 10, 50],
- ['g', 10, 60]]
-
- self.assertEqual(arvados.locators_and_ranges(blocks3, 2, 2), [['a', 10, 2, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks3, 12, 2), [['b', 10, 2, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks3, 22, 2), [['c', 10, 2, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks3, 32, 2), [['d', 10, 2, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks3, 42, 2), [['e', 10, 2, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks3, 52, 2), [['f', 10, 2, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks3, 62, 2), [['g', 10, 2, 2]])
-
-
- blocks = [['a', 10, 0],
- ['b', 15, 10],
- ['c', 5, 25]]
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 9, 2), [LocatorAndRange('a', 10, 9, 1), LocatorAndRange('b', 10, 0, 1)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 19, 2), [LocatorAndRange('b', 10, 9, 1), LocatorAndRange('c', 10, 0, 1)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 29, 2), [LocatorAndRange('c', 10, 9, 1), LocatorAndRange('d', 10, 0, 1)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 39, 2), [LocatorAndRange('d', 10, 9, 1), LocatorAndRange('e', 10, 0, 1)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 49, 2), [LocatorAndRange('e', 10, 9, 1), LocatorAndRange('f', 10, 0, 1)])
+ self.assertEqual(arvados.locators_and_ranges(blocks2, 59, 2), [LocatorAndRange('f', 10, 9, 1)])
+
+
+ blocks3 = [Range('a', 0, 10),
+ Range('b', 10, 10),
+ Range('c', 20, 10),
+ Range('d', 30, 10),
+ Range('e', 40, 10),
+ Range('f', 50, 10),
+ Range('g', 60, 10)]
+
+ self.assertEqual(arvados.locators_and_ranges(blocks3, 2, 2), [LocatorAndRange('a', 10, 2, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks3, 12, 2), [LocatorAndRange('b', 10, 2, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks3, 22, 2), [LocatorAndRange('c', 10, 2, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks3, 32, 2), [LocatorAndRange('d', 10, 2, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks3, 42, 2), [LocatorAndRange('e', 10, 2, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks3, 52, 2), [LocatorAndRange('f', 10, 2, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks3, 62, 2), [LocatorAndRange('g', 10, 2, 2)])
+
+
+ blocks = [Range('a', 0, 10),
+ Range('b', 10, 15),
+ Range('c', 25, 5)]
self.assertEqual(arvados.locators_and_ranges(blocks, 1, 0), [])
- self.assertEqual(arvados.locators_and_ranges(blocks, 0, 5), [['a', 10, 0, 5]])
- self.assertEqual(arvados.locators_and_ranges(blocks, 3, 5), [['a', 10, 3, 5]])
- self.assertEqual(arvados.locators_and_ranges(blocks, 0, 10), [['a', 10, 0, 10]])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 0, 11), [['a', 10, 0, 10],
- ['b', 15, 0, 1]])
- self.assertEqual(arvados.locators_and_ranges(blocks, 1, 11), [['a', 10, 1, 9],
- ['b', 15, 0, 2]])
- self.assertEqual(arvados.locators_and_ranges(blocks, 0, 25), [['a', 10, 0, 10],
- ['b', 15, 0, 15]])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 0, 30), [['a', 10, 0, 10],
- ['b', 15, 0, 15],
- ['c', 5, 0, 5]])
- self.assertEqual(arvados.locators_and_ranges(blocks, 1, 30), [['a', 10, 1, 9],
- ['b', 15, 0, 15],
- ['c', 5, 0, 5]])
- self.assertEqual(arvados.locators_and_ranges(blocks, 0, 31), [['a', 10, 0, 10],
- ['b', 15, 0, 15],
- ['c', 5, 0, 5]])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 15, 5), [['b', 15, 5, 5]])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 8, 17), [['a', 10, 8, 2],
- ['b', 15, 0, 15]])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 8, 20), [['a', 10, 8, 2],
- ['b', 15, 0, 15],
- ['c', 5, 0, 3]])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 26, 2), [['c', 5, 1, 2]])
-
- self.assertEqual(arvados.locators_and_ranges(blocks, 9, 15), [['a', 10, 9, 1],
- ['b', 15, 0, 14]])
- self.assertEqual(arvados.locators_and_ranges(blocks, 10, 15), [['b', 15, 0, 15]])
- self.assertEqual(arvados.locators_and_ranges(blocks, 11, 15), [['b', 15, 1, 14],
- ['c', 5, 0, 1]])
+ self.assertEqual(arvados.locators_and_ranges(blocks, 0, 5), [LocatorAndRange('a', 10, 0, 5)])
+ self.assertEqual(arvados.locators_and_ranges(blocks, 3, 5), [LocatorAndRange('a', 10, 3, 5)])
+ self.assertEqual(arvados.locators_and_ranges(blocks, 0, 10), [LocatorAndRange('a', 10, 0, 10)])
+
+ self.assertEqual(arvados.locators_and_ranges(blocks, 0, 11), [LocatorAndRange('a', 10, 0, 10),
+ LocatorAndRange('b', 15, 0, 1)])
+ self.assertEqual(arvados.locators_and_ranges(blocks, 1, 11), [LocatorAndRange('a', 10, 1, 9),
+ LocatorAndRange('b', 15, 0, 2)])
+ self.assertEqual(arvados.locators_and_ranges(blocks, 0, 25), [LocatorAndRange('a', 10, 0, 10),
+ LocatorAndRange('b', 15, 0, 15)])
+
+ self.assertEqual(arvados.locators_and_ranges(blocks, 0, 30), [LocatorAndRange('a', 10, 0, 10),
+ LocatorAndRange('b', 15, 0, 15),
+ LocatorAndRange('c', 5, 0, 5)])
+ self.assertEqual(arvados.locators_and_ranges(blocks, 1, 30), [LocatorAndRange('a', 10, 1, 9),
+ LocatorAndRange('b', 15, 0, 15),
+ LocatorAndRange('c', 5, 0, 5)])
+ self.assertEqual(arvados.locators_and_ranges(blocks, 0, 31), [LocatorAndRange('a', 10, 0, 10),
+ LocatorAndRange('b', 15, 0, 15),
+ LocatorAndRange('c', 5, 0, 5)])
+
+ self.assertEqual(arvados.locators_and_ranges(blocks, 15, 5), [LocatorAndRange('b', 15, 5, 5)])
+
+ self.assertEqual(arvados.locators_and_ranges(blocks, 8, 17), [LocatorAndRange('a', 10, 8, 2),
+ LocatorAndRange('b', 15, 0, 15)])
+
+ self.assertEqual(arvados.locators_and_ranges(blocks, 8, 20), [LocatorAndRange('a', 10, 8, 2),
+ LocatorAndRange('b', 15, 0, 15),
+ LocatorAndRange('c', 5, 0, 3)])
+
+ self.assertEqual(arvados.locators_and_ranges(blocks, 26, 2), [LocatorAndRange('c', 5, 1, 2)])
+
+ self.assertEqual(arvados.locators_and_ranges(blocks, 9, 15), [LocatorAndRange('a', 10, 9, 1),
+ LocatorAndRange('b', 15, 0, 14)])
+ self.assertEqual(arvados.locators_and_ranges(blocks, 10, 15), [LocatorAndRange('b', 15, 0, 15)])
+ self.assertEqual(arvados.locators_and_ranges(blocks, 11, 15), [LocatorAndRange('b', 15, 1, 14),
+ LocatorAndRange('c', 5, 0, 1)])
class MockKeep(object):
def __init__(self, content, num_retries=0):
with self.make_test_file() as testfile:
cwriter.write_file(testfile.name, 'test')
resumed = TestResumableWriter.from_state(cwriter.current_state())
- self.assertEquals(cwriter.manifest_text(), resumed.manifest_text(),
+ self.assertEqual(cwriter.manifest_text(), resumed.manifest_text(),
"resumed CollectionWriter had different manifest")
def test_resume_fails_when_missing_dependency(self):
def test_uuid_init_failure_raises_api_error(self):
client = self.api_client_mock(500)
- reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
with self.assertRaises(arvados.errors.ApiError):
- reader.manifest_text()
+ reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
def test_locator_init(self):
client = self.api_client_mock(200)
def test_uuid_init_no_fallback_to_keep(self):
# Do not look up a collection UUID in Keep.
client = self.api_client_mock(404)
- reader = arvados.CollectionReader(self.DEFAULT_UUID,
- api_client=client)
with tutil.mock_get_responses(self.DEFAULT_MANIFEST, 200):
with self.assertRaises(arvados.errors.ApiError):
- reader.manifest_text()
+ reader = arvados.CollectionReader(self.DEFAULT_UUID,
+ api_client=client)
def test_try_keep_first_if_permission_hint(self):
# To verify that CollectionReader tries Keep first here, we
def check_open_file(self, coll_file, stream_name, file_name, file_size):
self.assertFalse(coll_file.closed, "returned file is not open")
self.assertEqual(stream_name, coll_file.stream_name())
- self.assertEqual(file_name, coll_file.name())
+ self.assertEqual(file_name, coll_file.name)
self.assertEqual(file_size, coll_file.size())
def test_open_collection_file_one_argument(self):
cfile = reader.open('./foo')
self.check_open_file(cfile, '.', 'foo', 3)
- def test_open_collection_file_two_arguments(self):
- client = self.api_client_mock(200)
- reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
- cfile = reader.open('.', 'foo')
- self.check_open_file(cfile, '.', 'foo', 3)
-
def test_open_deep_file(self):
coll_name = 'collection_with_files_in_subdir'
client = self.api_client_mock(200)
def test_open_nonexistent_stream(self):
client = self.api_client_mock(200)
reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
- self.assertRaises(ValueError, reader.open, './nonexistent', 'foo')
+ self.assertRaises(IOError, reader.open, './nonexistent/foo')
def test_open_nonexistent_file(self):
client = self.api_client_mock(200)
reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
- self.assertRaises(ValueError, reader.open, '.', 'nonexistent')
+ self.assertRaises(IOError, reader.open, 'nonexistent')
@tutil.skip_sleep
def test_write_insufficient_replicas_via_disks(self):
client = mock.MagicMock(name='api_client')
- self.mock_keep_services(client, status=200, service_type='disk', count=2)
- writer = self.foo_writer(api_client=client, replication=3)
with self.mock_keep(
None, 200, 200,
**{'x-keep-replicas-stored': 1}) as keepmock:
+ self.mock_keep_services(client, status=200, service_type='disk', count=2)
+ writer = self.foo_writer(api_client=client, replication=3)
with self.assertRaises(arvados.errors.KeepWriteError):
writer.manifest_text()
def test_write_three_replicas(self):
client = mock.MagicMock(name='api_client')
- self.mock_keep_services(client, status=200, service_type='disk', count=6)
- writer = self.foo_writer(api_client=client, replication=3)
with self.mock_keep(
None, 500, 500, 500, 200, 200, 200,
**{'x-keep-replicas-stored': 1}) as keepmock:
+ self.mock_keep_services(client, status=200, service_type='disk', count=6)
+ writer = self.foo_writer(api_client=client, replication=3)
writer.manifest_text()
- self.assertEqual(6, keepmock.call_count)
+ # keepmock is the mock session constructor; keepmock.return_value
+ # is the mock session object, and keepmock.return_value.put is the
+ # actual mock method of interest.
+ self.assertEqual(6, keepmock.return_value.put.call_count)
def test_write_whole_collection_through_retries(self):
writer = self.foo_writer(num_retries=2)
def test_open_flush(self):
client = self.api_client_mock()
- writer = arvados.CollectionWriter(client)
- with writer.open('flush_test') as out_file:
- out_file.write('flush1')
- data_loc1 = hashlib.md5('flush1').hexdigest() + '+6'
- with self.mock_keep(data_loc1, 200) as keep_mock:
+ data_loc1 = hashlib.md5('flush1').hexdigest() + '+6'
+ data_loc2 = hashlib.md5('flush2').hexdigest() + '+6'
+ with self.mock_keep((data_loc1, 200), (data_loc2, 200)) as keep_mock:
+ writer = arvados.CollectionWriter(client)
+ with writer.open('flush_test') as out_file:
+ out_file.write('flush1')
out_file.flush()
- out_file.write('flush2')
- data_loc2 = hashlib.md5('flush2').hexdigest() + '+6'
- with self.mock_keep(data_loc2, 200) as keep_mock:
+ out_file.write('flush2')
self.assertEqual(". {} {} 0:12:flush_test\n".format(data_loc1,
data_loc2),
writer.manifest_text())
def test_two_opens_two_streams(self):
client = self.api_client_mock()
- writer = arvados.CollectionWriter(client)
- with writer.open('file') as out_file:
- out_file.write('file')
- data_loc1 = hashlib.md5('file').hexdigest() + '+4'
- with self.mock_keep(data_loc1, 200) as keep_mock:
+ data_loc1 = hashlib.md5('file').hexdigest() + '+4'
+ data_loc2 = hashlib.md5('indir').hexdigest() + '+5'
+ with self.mock_keep((data_loc1, 200), (data_loc2, 200)) as keep_mock:
+ writer = arvados.CollectionWriter(client)
+ with writer.open('file') as out_file:
+ out_file.write('file')
with writer.open('./dir', 'indir') as out_file:
out_file.write('indir')
- data_loc2 = hashlib.md5('indir').hexdigest() + '+5'
- with self.mock_keep(data_loc2, 200) as keep_mock:
expected = ". {} 0:4:file\n./dir {} 0:5:indir\n".format(
data_loc1, data_loc2)
self.assertEqual(expected, writer.manifest_text())
self.assertRaises(arvados.errors.AssertionError, writer.open, 'two')
+class NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):
+
+ def test_init_manifest(self):
+ m1 = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
+"""
+ self.assertEqual(m1, CollectionReader(m1).manifest_text(normalize=False))
+ self.assertEqual(". 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:127:md5sum.txt\n", CollectionReader(m1).manifest_text(normalize=True))
+
+ def test_init_manifest_with_collision(self):
+ m1 = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+./md5sum.txt 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+"""
+ with self.assertRaises(arvados.errors.ArgumentError):
+ self.assertEqual(m1, CollectionReader(m1))
+
+ def test_init_manifest_with_error(self):
+ m1 = """. 0:43:md5sum.txt"""
+ with self.assertRaises(arvados.errors.ArgumentError):
+ self.assertEqual(m1, CollectionReader(m1))
+
+ def test_remove(self):
+ c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n')
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n", c.manifest_text())
+ self.assertIn("count1.txt", c)
+ c.remove("count1.txt")
+ self.assertNotIn("count1.txt", c)
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n", c.manifest_text())
+
+ def test_remove_in_subdir(self):
+ c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+ c.remove("foo/count2.txt")
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n", c.manifest_text())
+
+ def test_remove_empty_subdir(self):
+ c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+ c.remove("foo/count2.txt")
+ c.remove("foo")
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n", c.manifest_text())
+
+ def test_remove_nonempty_subdir(self):
+ c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+ with self.assertRaises(IOError):
+ c.remove("foo")
+ c.remove("foo", recursive=True)
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n", c.manifest_text())
+
+ def test_copy_to_file_in_dir(self):
+ c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+ c.copy("count1.txt", "foo/count2.txt")
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n", c.manifest_text())
+
+ def test_copy_file(self):
+ c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+ c.copy("count1.txt", "count2.txt")
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n", c.manifest_text())
+
+ def test_copy_to_existing_dir(self):
+ c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+ c.copy("count1.txt", "foo")
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n", c.manifest_text())
+
+ def test_copy_to_new_dir(self):
+ c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+ c.copy("count1.txt", "foo/")
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n", c.manifest_text())
+
+ def test_clone(self):
+ c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+ cl = c.clone()
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n", cl.manifest_text())
+
+ def test_diff_del_add(self):
+ c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+ c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+ d = c2.diff(c1)
+ self.assertEqual(d, [('del', './count2.txt', c2["count2.txt"]),
+ ('add', './count1.txt', c1["count1.txt"])])
+ d = c1.diff(c2)
+ self.assertEqual(d, [('del', './count1.txt', c1["count1.txt"]),
+ ('add', './count2.txt', c2["count2.txt"])])
+ self.assertNotEqual(c1.manifest_text(), c2.manifest_text())
+ c1.apply(d)
+ self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+ def test_diff_same(self):
+ c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+ c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+ d = c2.diff(c1)
+ self.assertEqual(d, [])
+ d = c1.diff(c2)
+ self.assertEqual(d, [])
+
+ self.assertEqual(c1.manifest_text(), c2.manifest_text())
+ c1.apply(d)
+ self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+ def test_diff_mod(self):
+ c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+ c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt\n')
+ d = c2.diff(c1)
+ self.assertEqual(d, [('mod', './count1.txt', c2["count1.txt"], c1["count1.txt"])])
+ d = c1.diff(c2)
+ self.assertEqual(d, [('mod', './count1.txt', c1["count1.txt"], c2["count1.txt"])])
+
+ self.assertNotEqual(c1.manifest_text(), c2.manifest_text())
+ c1.apply(d)
+ self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+ def test_diff_add(self):
+ c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+ c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt 10:20:count2.txt\n')
+ d = c2.diff(c1)
+ self.assertEqual(d, [('del', './count2.txt', c2["count2.txt"])])
+ d = c1.diff(c2)
+ self.assertEqual(d, [('add', './count2.txt', c2["count2.txt"])])
+
+ self.assertNotEqual(c1.manifest_text(), c2.manifest_text())
+ c1.apply(d)
+ self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+ def test_diff_add_in_subcollection(self):
+ c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+ c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+ d = c2.diff(c1)
+ self.assertEqual(d, [('del', './foo', c2["foo"])])
+ d = c1.diff(c2)
+ self.assertEqual(d, [('add', './foo', c2["foo"])])
+
+ self.assertNotEqual(c1.manifest_text(), c2.manifest_text())
+ c1.apply(d)
+ self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+ def test_diff_del_add_in_subcollection(self):
+ c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+ c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:3:count3.txt\n')
+
+ d = c2.diff(c1)
+ self.assertEqual(d, [('del', './foo/count3.txt', c2.find("foo/count3.txt")),
+ ('add', './foo/count2.txt', c1.find("foo/count2.txt"))])
+ d = c1.diff(c2)
+ self.assertEqual(d, [('del', './foo/count2.txt', c1.find("foo/count2.txt")),
+ ('add', './foo/count3.txt', c2.find("foo/count3.txt"))])
+
+ self.assertNotEqual(c1.manifest_text(), c2.manifest_text())
+ c1.apply(d)
+ self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+ def test_diff_mod_in_subcollection(self):
+ c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+ c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:3:foo\n')
+ d = c2.diff(c1)
+ self.assertEqual(d, [('mod', './foo', c2["foo"], c1["foo"])])
+ d = c1.diff(c2)
+ self.assertEqual(d, [('mod', './foo', c1["foo"], c2["foo"])])
+
+ self.assertNotEqual(c1.manifest_text(), c2.manifest_text())
+ c1.apply(d)
+ self.assertEqual(c1.manifest_text(), c2.manifest_text())
+
+ def test_conflict_keep_local_change(self):
+ c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n')
+ c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\n')
+ d = c1.diff(c2)
+ self.assertEqual(d, [('del', './count1.txt', c1["count1.txt"]),
+ ('add', './count2.txt', c2["count2.txt"])])
+ with c1.open("count1.txt", "w") as f:
+ f.write("zzzzz")
+
+ # c1 changed, so it should not be deleted.
+ c1.apply(d)
+ self.assertEqual(c1.manifest_text(), ". 95ebc3c7b3b9f1d2c40fec14415d3cb8+5 5348b82a029fd9e971a811ce1f71360b+43 0:5:count1.txt 5:10:count2.txt\n")
+
+ def test_conflict_mod(self):
+ c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt')
+ c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt')
+ d = c1.diff(c2)
+ self.assertEqual(d, [('mod', './count1.txt', c1["count1.txt"], c2["count1.txt"])])
+ with c1.open("count1.txt", "w") as f:
+ f.write("zzzzz")
+
+ # c1 changed, so c2 mod will go to a conflict file
+ c1.apply(d)
+ self.assertRegexpMatches(c1.manifest_text(), r"\. 95ebc3c7b3b9f1d2c40fec14415d3cb8\+5 5348b82a029fd9e971a811ce1f71360b\+43 0:5:count1\.txt 5:10:count1\.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$")
+
+ def test_conflict_add(self):
+ c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
+ c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt\n')
+ d = c1.diff(c2)
+ self.assertEqual(d, [('del', './count2.txt', c1["count2.txt"]),
+ ('add', './count1.txt', c2["count1.txt"])])
+ with c1.open("count1.txt", "w") as f:
+ f.write("zzzzz")
+
+ # c1 added count1.txt, so c2 add will go to a conflict file
+ c1.apply(d)
+ self.assertRegexpMatches(c1.manifest_text(), r"\. 95ebc3c7b3b9f1d2c40fec14415d3cb8\+5 5348b82a029fd9e971a811ce1f71360b\+43 0:5:count1\.txt 5:10:count1\.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$")
+
+ def test_conflict_del(self):
+ c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt')
+ c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt')
+ d = c1.diff(c2)
+ self.assertEqual(d, [('mod', './count1.txt', c1["count1.txt"], c2["count1.txt"])])
+ c1.remove("count1.txt")
+
+ # c1 deleted, so c2 mod will go to a conflict file
+ c1.apply(d)
+ self.assertRegexpMatches(c1.manifest_text(), r"\. 5348b82a029fd9e971a811ce1f71360b\+43 0:10:count1\.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$")
+
+ def test_notify(self):
+ c1 = Collection()
+ events = []
+ c1.subscribe(lambda event, collection, name, item: events.append((event, collection, name, item)))
+ f = c1.open("foo.txt", "w")
+ self.assertEqual(events[0], (arvados.collection.ADD, c1, "foo.txt", f.arvadosfile))
+
+ def test_open_w(self):
+ c1 = Collection(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n")
+ self.assertEqual(c1["count1.txt"].size(), 10)
+ c1.open("count1.txt", "w").close()
+ self.assertEqual(c1["count1.txt"].size(), 0)
+
+
+class CollectionCreateUpdateTest(run_test_server.TestCaseWithServers):
+ MAIN_SERVER = {}
+ KEEP_SERVER = {}
+
+ def create_count_txt(self):
+ # Create an empty collection, save it to the API server, then write a
+ # file, but don't save it.
+
+ c = Collection()
+ c.save_new("CollectionCreateUpdateTest", ensure_unique_name=True)
+ self.assertEqual(c.portable_data_hash(), "d41d8cd98f00b204e9800998ecf8427e+0")
+ self.assertEqual(c.api_response()["portable_data_hash"], "d41d8cd98f00b204e9800998ecf8427e+0" )
+
+ with c.open("count.txt", "w") as f:
+ f.write("0123456789")
+
+ self.assertEqual(c.manifest_text(), ". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n")
+
+ return c
+
+ def test_create_and_save(self):
+ c = self.create_count_txt()
+ c.save()
+ self.assertRegexpMatches(c.manifest_text(), r"^\. 781e5e245d69b566979b86e28d23f2c7\+10\+A[a-f0-9]{40}@[a-f0-9]{8} 0:10:count\.txt$",)
+
+
+ def test_create_and_save_new(self):
+ c = self.create_count_txt()
+ c.save_new()
+ self.assertRegexpMatches(c.manifest_text(), r"^\. 781e5e245d69b566979b86e28d23f2c7\+10\+A[a-f0-9]{40}@[a-f0-9]{8} 0:10:count\.txt$",)
+
+ def test_create_diff_apply(self):
+ c1 = self.create_count_txt()
+ c1.save()
+
+ c2 = Collection(c1._manifest_locator)
+ with c2.open("count.txt", "w") as f:
+ f.write("abcdefg")
+
+ diff = c1.diff(c2)
+
+ self.assertEqual(diff[0], (arvados.collection.MOD, u'./count.txt', c1["count.txt"], c2["count.txt"]))
+
+ c1.apply(diff)
+ self.assertEqual(c1.portable_data_hash(), c2.portable_data_hash())
+
+ def test_diff_apply_with_token(self):
+ baseline = CollectionReader(". 781e5e245d69b566979b86e28d23f2c7+10+A715fd31f8111894f717eb1003c1b0216799dd9ec@54f5dd1a 0:10:count.txt\n")
+ c = Collection(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n")
+ other = CollectionReader(". 7ac66c0f148de9519b8bd264312c4d64+7+A715fd31f8111894f717eb1003c1b0216799dd9ec@54f5dd1a 0:7:count.txt\n")
+
+ diff = baseline.diff(other)
+ self.assertEqual(diff, [('mod', u'./count.txt', c["count.txt"], other["count.txt"])])
+
+ c.apply(diff)
+
+ self.assertEqual(c.manifest_text(), ". 7ac66c0f148de9519b8bd264312c4d64+7+A715fd31f8111894f717eb1003c1b0216799dd9ec@54f5dd1a 0:7:count.txt\n")
+
+
+ def test_create_and_update(self):
+ c1 = self.create_count_txt()
+ c1.save()
+
+ c2 = arvados.collection.Collection(c1._manifest_locator)
+ with c2.open("count.txt", "w") as f:
+ f.write("abcdefg")
+
+ c2.save()
+
+ self.assertNotEqual(c1.portable_data_hash(), c2.portable_data_hash())
+ c1.update()
+ self.assertEqual(c1.portable_data_hash(), c2.portable_data_hash())
+
+
+ def test_create_and_update_with_conflict(self):
+ c1 = self.create_count_txt()
+ c1.save()
+
+ with c1.open("count.txt", "w") as f:
+ f.write("XYZ")
+
+ c2 = arvados.collection.Collection(c1._manifest_locator)
+ with c2.open("count.txt", "w") as f:
+ f.write("abcdefg")
+
+ c2.save()
+
+ c1.update()
+ self.assertRegexpMatches(c1.manifest_text(), r"\. e65075d550f9b5bf9992fa1d71a131be\+3 7ac66c0f148de9519b8bd264312c4d64\+7\+A[a-f0-9]{40}@[a-f0-9]{8} 0:3:count\.txt 3:7:count\.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$")
+
+
if __name__ == '__main__':
unittest.main()
def test_get_timeout(self):
api_client = self.mock_keep_services(count=1)
- keep_client = arvados.KeepClient(api_client=api_client)
force_timeout = [socket.timeout("timed out")]
- with mock.patch('requests.get', side_effect=force_timeout) as mock_request:
+ with tutil.mock_get(force_timeout) as mock_session:
+ keep_client = arvados.KeepClient(api_client=api_client)
with self.assertRaises(arvados.errors.KeepReadError):
keep_client.get('ffffffffffffffffffffffffffffffff')
- self.assertTrue(mock_request.called)
+ self.assertTrue(mock_session.return_value.get.called)
self.assertEqual(
arvados.KeepClient.DEFAULT_TIMEOUT,
- mock_request.call_args[1]['timeout'])
+ mock_session.return_value.get.call_args[1]['timeout'])
def test_put_timeout(self):
api_client = self.mock_keep_services(count=1)
- keep_client = arvados.KeepClient(api_client=api_client)
force_timeout = [socket.timeout("timed out")]
- with mock.patch('requests.put', side_effect=force_timeout) as mock_request:
+ with tutil.mock_put(force_timeout) as mock_session:
+ keep_client = arvados.KeepClient(api_client=api_client)
with self.assertRaises(arvados.errors.KeepWriteError):
keep_client.put('foo')
- self.assertTrue(mock_request.called)
+ self.assertTrue(mock_session.return_value.put.called)
self.assertEqual(
arvados.KeepClient.DEFAULT_TIMEOUT,
- mock_request.call_args[1]['timeout'])
+ mock_session.return_value.put.call_args[1]['timeout'])
def test_proxy_get_timeout(self):
# Force a timeout, verifying that the requests.get or
# requests.put method was called with the proxy_timeout
# setting rather than the default timeout.
api_client = self.mock_keep_services(service_type='proxy', count=1)
- keep_client = arvados.KeepClient(api_client=api_client)
force_timeout = [socket.timeout("timed out")]
- with mock.patch('requests.get', side_effect=force_timeout) as mock_request:
+ with tutil.mock_get(force_timeout) as mock_session:
+ keep_client = arvados.KeepClient(api_client=api_client)
with self.assertRaises(arvados.errors.KeepReadError):
keep_client.get('ffffffffffffffffffffffffffffffff')
- self.assertTrue(mock_request.called)
+ self.assertTrue(mock_session.return_value.get.called)
self.assertEqual(
arvados.KeepClient.DEFAULT_PROXY_TIMEOUT,
- mock_request.call_args[1]['timeout'])
+ mock_session.return_value.get.call_args[1]['timeout'])
def test_proxy_put_timeout(self):
# Force a timeout, verifying that the requests.get or
# requests.put method was called with the proxy_timeout
# setting rather than the default timeout.
api_client = self.mock_keep_services(service_type='proxy', count=1)
- keep_client = arvados.KeepClient(api_client=api_client)
force_timeout = [socket.timeout("timed out")]
- with mock.patch('requests.put', side_effect=force_timeout) as mock_request:
+ with tutil.mock_put(force_timeout) as mock_session:
+ keep_client = arvados.KeepClient(api_client=api_client)
with self.assertRaises(arvados.errors.KeepWriteError):
keep_client.put('foo')
- self.assertTrue(mock_request.called)
+ self.assertTrue(mock_session.return_value.put.called)
self.assertEqual(
arvados.KeepClient.DEFAULT_PROXY_TIMEOUT,
- mock_request.call_args[1]['timeout'])
+ mock_session.return_value.put.call_args[1]['timeout'])
def test_probe_order_reference_set(self):
# expected_order[i] is the probe order for
def check_errors_from_last_retry(self, verb, exc_class):
api_client = self.mock_keep_services(count=2)
- keep_client = arvados.KeepClient(api_client=api_client)
req_mock = getattr(tutil, 'mock_{}_responses'.format(verb))(
"retry error reporting test", 500, 500, 403, 403)
with req_mock, tutil.skip_sleep, \
self.assertRaises(exc_class) as err_check:
+ keep_client = arvados.KeepClient(api_client=api_client)
getattr(keep_client, verb)('d41d8cd98f00b204e9800998ecf8427e+0',
num_retries=3)
self.assertEqual([403, 403], [
data = 'partial failure test'
data_loc = '{}+{}'.format(hashlib.md5(data).hexdigest(), len(data))
api_client = self.mock_keep_services(count=3)
- keep_client = arvados.KeepClient(api_client=api_client)
with tutil.mock_put_responses(data_loc, 200, 500, 500) as req_mock, \
self.assertRaises(arvados.errors.KeepWriteError) as exc_check:
+ keep_client = arvados.KeepClient(api_client=api_client)
keep_client.put(data)
self.assertEqual(2, len(exc_check.exception.service_errors()))
self.check_success(locator=self.HINTED_LOCATOR)
def test_try_next_server_after_timeout(self):
- side_effects = [
- socket.timeout("timed out"),
- tutil.fake_requests_response(200, self.DEFAULT_EXPECT)]
- with mock.patch('requests.get',
- side_effect=iter(side_effects)):
+ with tutil.mock_get([
+ socket.timeout("timed out"),
+ tutil.fake_requests_response(200, self.DEFAULT_EXPECT)]):
self.check_success(locator=self.HINTED_LOCATOR)
def test_retry_data_with_wrong_checksum(self):
- side_effects = (tutil.fake_requests_response(200, s)
- for s in ['baddata', self.TEST_DATA])
- with mock.patch('requests.get', side_effect=side_effects):
+ with tutil.mock_get((tutil.fake_requests_response(200, s) for s in ['baddata', self.TEST_DATA])):
self.check_success(locator=self.HINTED_LOCATOR)
(self.sizes(), self.perm_hints())]:
for loc_data in itertools.izip(self.checksums(), *hint_gens):
locator = '+'.join(loc_data)
- self.assertEquals(locator, str(KeepLocator(locator)))
+ self.assertEqual(locator, str(KeepLocator(locator)))
def test_nonchecksum_rejected(self):
for badstr in ['', 'badbadbad', '8f9e68d957b504a29ba76c526c3145dj',
base = next(self.base_locators(1))
for weirdhint in ['Zfoo', 'Ybar234', 'Xa@b_c-372', 'W99']:
locator = '+'.join([base, weirdhint])
- self.assertEquals(locator, str(KeepLocator(locator)))
+ self.assertEqual(locator, str(KeepLocator(locator)))
def test_bad_hints_rejected(self):
base = next(self.base_locators(1))
base = next(self.base_locators(1))
for loc_hints in itertools.permutations(['Kab1cd', 'Kef2gh', 'Kij3kl']):
locator = '+'.join((base,) + loc_hints)
- self.assertEquals(locator, str(KeepLocator(locator)))
+ self.assertEqual(locator, str(KeepLocator(locator)))
def test_expiry_passed(self):
base = next(self.base_locators(1))
# it should now create only one job task and not three.
arvados.job_setup.one_task_per_input_file(and_end_task=False)
mock_api('v1').job_tasks().create().execute.assert_called_once_with()
-
import mock
import os
import unittest
+import hashlib
import arvados
from arvados import StreamReader, StreamFileReader
+from arvados._ranges import Range
import arvados_testutil as tutil
import run_test_server
class StreamFileReaderTestCase(unittest.TestCase):
def make_count_reader(self):
stream = tutil.MockStreamReader('.', '01234', '34567', '67890')
- return StreamFileReader(stream, [[1, 3, 0], [6, 3, 3], [11, 3, 6]],
+ return StreamFileReader(stream, [Range(1, 0, 3), Range(6, 3, 3), Range(11, 6, 3)],
'count.txt')
def test_read_returns_first_block(self):
def make_newlines_reader(self):
stream = tutil.MockStreamReader('.', 'one\ntwo\n\nth', 'ree\nfour\n\n')
- return StreamFileReader(stream, [[0, 11, 0], [11, 10, 11]], 'count.txt')
+ return StreamFileReader(stream, [Range(0, 0, 11), Range(11, 11, 10)], 'count.txt')
def check_lines(self, actual):
self.assertEqual(['one\n', 'two\n', '\n', 'three\n', 'four\n', '\n'],
def test_name_attribute(self):
# Test both .name and .name() (for backward compatibility)
stream = tutil.MockStreamReader()
- sfile = StreamFileReader(stream, [[0, 0, 0]], 'nametest')
+ sfile = StreamFileReader(stream, [Range(0, 0, 0)], 'nametest')
self.assertEqual('nametest', sfile.name)
self.assertEqual('nametest', sfile.name())
test_text = 'decompression\ntest\n'
test_data = compress_func(test_text)
stream = tutil.MockStreamReader('.', test_data)
- reader = StreamFileReader(stream, [[0, len(test_data), 0]],
+ reader = StreamFileReader(stream, [Range(0, 0, len(test_data))],
'test.' + compress_ext)
self.assertEqual(test_text, ''.join(reader.readall_decompressed()))
@tutil.skip_sleep
def test_success_without_retries(self):
- reader = self.reader_for('bar_file')
with tutil.mock_get_responses('bar', 200):
+ reader = self.reader_for('bar_file')
self.assertEqual('bar', self.read_for_test(reader, 3))
@tutil.skip_sleep
def test_read_no_default_retry(self):
- reader = self.reader_for('user_agreement')
with tutil.mock_get_responses('', 500):
+ reader = self.reader_for('user_agreement')
with self.assertRaises(arvados.errors.KeepReadError):
self.read_for_test(reader, 10)
@tutil.skip_sleep
def test_read_with_instance_retries(self):
- reader = self.reader_for('foo_file', num_retries=3)
with tutil.mock_get_responses('foo', 500, 200):
+ reader = self.reader_for('foo_file', num_retries=3)
self.assertEqual('foo', self.read_for_test(reader, 3))
@tutil.skip_sleep
def test_read_with_method_retries(self):
- reader = self.reader_for('foo_file')
with tutil.mock_get_responses('foo', 500, 200):
+ reader = self.reader_for('foo_file')
self.assertEqual('foo',
self.read_for_test(reader, 3, num_retries=3))
@tutil.skip_sleep
def test_read_instance_retries_exhausted(self):
- reader = self.reader_for('bar_file', num_retries=3)
with tutil.mock_get_responses('bar', 500, 500, 500, 500, 200):
+ reader = self.reader_for('bar_file', num_retries=3)
with self.assertRaises(arvados.errors.KeepReadError):
self.read_for_test(reader, 3)
@tutil.skip_sleep
def test_read_method_retries_exhausted(self):
- reader = self.reader_for('bar_file')
with tutil.mock_get_responses('bar', 500, 500, 500, 500, 200):
+ reader = self.reader_for('bar_file')
with self.assertRaises(arvados.errors.KeepReadError):
self.read_for_test(reader, 3, num_retries=3)
@tutil.skip_sleep
def test_method_retries_take_precedence(self):
- reader = self.reader_for('user_agreement', num_retries=10)
with tutil.mock_get_responses('', 500, 500, 500, 200):
+ reader = self.reader_for('user_agreement', num_retries=10)
with self.assertRaises(arvados.errors.KeepReadError):
self.read_for_test(reader, 10, num_retries=1)
def read_for_test(self, reader, byte_count, **kwargs):
return ''.join(reader.readlines(**kwargs))
-
if __name__ == '__main__':
unittest.main()
# appear as underscores in the fuse mount.)
_disallowed_filename_characters = re.compile('[\x00/]')
-class SafeApi(object):
- """Threadsafe wrapper for API object.
-
- This stores and returns a different api object per thread, because
- httplib2 which underlies apiclient is not threadsafe.
- """
-
- def __init__(self, config):
- self.host = config.get('ARVADOS_API_HOST')
- self.api_token = config.get('ARVADOS_API_TOKEN')
- self.insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE')
- self.local = threading.local()
- self.block_cache = arvados.KeepBlockCache()
-
- def localapi(self):
- if 'api' not in self.local.__dict__:
- self.local.api = arvados.api(
- version='v1',
- host=self.host, token=self.api_token, insecure=self.insecure)
- return self.local.api
-
- def localkeep(self):
- if 'keep' not in self.local.__dict__:
- self.local.keep = arvados.KeepClient(api_client=self.localapi(), block_cache=self.block_cache)
- return self.local.keep
-
- def __getattr__(self, name):
- # Proxy nonexistent attributes to the local API client.
- try:
- return getattr(self.localapi(), name)
- except AttributeError:
- return super(SafeApi, self).__getattr__(name)
-
-
def convertTime(t):
"""Parse Arvados timestamp to unix time."""
if not t:
with llfuse.lock_released:
coll_reader = arvados.CollectionReader(
- self.collection_locator, self.api, self.api.localkeep(),
+ self.collection_locator, self.api, self.api.keep,
num_retries=self.num_retries)
new_collection_object = coll_reader.api_response() or {}
# If the Collection only exists in Keep, there will be no API
# arv-mount.
# The workaround is to implement it with the proper number of parameters,
# and then everything works out.
- def create(self, p1, p2, p3, p4, p5):
+ def create(self, inode_parent, name, mode, flags, ctx):
raise llfuse.FUSEError(errno.EROFS)
import arvados.commands._util as arv_cmd
from arvados_fuse import *
+from arvados.safeapi import ThreadSafeApiCache
logger = logging.getLogger('arvados.arv-mount')
try:
# Create the request handler
operations = Operations(os.getuid(), os.getgid(), args.encoding)
- api = SafeApi(arvados.config)
+ api = ThreadSafeApiCache(arvados.config)
usr = api.users().current().execute(num_retries=args.retries)
now = time.time()
self.mounttmp = tempfile.mkdtemp()
run_test_server.run()
run_test_server.authorize_with("admin")
- self.api = fuse.SafeApi(arvados.config)
+ self.api = arvados.safeapi.ThreadSafeApiCache(arvados.config.settings())
def make_mount(self, root_class, **root_kwargs):
operations = fuse.Operations(os.getuid(), os.getgid())