Merge branch 'master' into 15803-unsetup
[arvados.git] / sdk / python / arvados / stream.py
index 3843411771f3546dff950789306f1b9bb89ac10f..edfb7711b829a100688f82bff203ebfec986096d 100644 (file)
-import gflags
-import httplib
-import httplib2
-import logging
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import print_function
+from __future__ import absolute_import
+from future.utils import listvalues
+from builtins import object
+import collections
+import hashlib
 import os
-import pprint
-import sys
-import types
-import subprocess
-import json
-import UserDict
 import re
-import hashlib
-import string
-import bz2
-import zlib
-import fcntl
-import time
 import threading
+import functools
+import copy
 
-from keep import *
-import config
-import errors
-
-LOCATOR = 0
-BLOCKSIZE = 1
-OFFSET = 2
-SEGMENTSIZE = 3
-
-def locators_and_ranges(data_locators, range_start, range_size):
-    '''returns list of [block locator, blocksize, segment offset, segment size] that satisfies the range'''
-    resp = []
-    range_start = long(range_start)
-    range_size = long(range_size)
-    range_end = range_start + range_size
-    block_start = 0L
-    for locator, block_size, block_start in data_locators:
-        block_end = block_start + block_size
-        if range_end < block_start:
-            # range ends before this block starts, so don't look at any more locators
-            break
-        if range_start > block_end:
-            # range starts after this block ends, so go to next block
-            next
-        elif range_start >= block_start and range_end <= block_end:
-            # range starts and ends in this block
-            resp.append([locator, block_size, range_start - block_start, range_size])
-        elif range_start >= block_start:
-            # range starts in this block
-            resp.append([locator, block_size, range_start - block_start, block_end - range_start])
-        elif range_start < block_start and range_end > block_end:
-            # range starts in a previous block and extends to further blocks
-            resp.append([locator, block_size, 0L, block_size])
-        elif range_start < block_start and range_end <= block_end:
-            # range starts in a previous block and ends in this block
-            resp.append([locator, block_size, 0L, range_end - block_start])
-        block_start = block_end
-    return resp
-
-
-class StreamFileReader(object):
-    def __init__(self, stream, segments, name):
-        self._stream = stream
-        self.segments = segments
-        self._name = name
-        self._filepos = 0L
-
-    def name(self):
-        return self._name
-
-    def decompressed_name(self):
-        return re.sub('\.(bz2|gz)$', '', self._name)
-
-    def stream_name(self):
-        return self._stream.name()
-
-    def seek(self, pos):
-        self._filepos = min(max(pos, 0L), self.size())
-
-    def tell(self, pos):
-        return self._filepos
-
-    def size(self):
-        n = self.segments[-1]
-        return n[OFFSET] + n[BLOCKSIZE]
-
-    def read(self, size):
-        """Read up to 'size' bytes from the stream, starting at the current file position"""
-        if size == 0:
-            return ''
-
-        data = ''
-        for locator, blocksize, segmentoffset, segmentsize in locators_and_ranges(self.segments, self._filepos, size):
-            self._stream.seek(locator+segmentoffset)
-            data += self._stream.read(segmentsize)
-            self._filepos += len(data)
-        return data
-
-    def readall(self, size=2**20):
-        while True:
-            data = self.read(size)
-            if data == '':
-                break
-            yield data
-
-    def bunzip2(self, size):
-        decompressor = bz2.BZ2Decompressor()
-        for segment in self.readall(size):
-            data = decompressor.decompress(segment)
-            if data and data != '':
-                yield data
-
-    def gunzip(self, size):
-        decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
-        for segment in self.readall(size):
-            data = decompressor.decompress(decompressor.unconsumed_tail + segment)
-            if data and data != '':
-                yield data
-
-    def readall_decompressed(self, size=2**20):
-        self.seek(0)
-        if re.search('\.bz2$', self._name):
-            return self.bunzip2(size)
-        elif re.search('\.gz$', self._name):
-            return self.gunzip(size)
-        else:
-            return self.readall(size)
-
-    def readlines(self, decompress=True):
-        if decompress:
-            datasource = self.readall_decompressed()
-        else:
-            self._stream.seek(self._pos + self._filepos)
-            datasource = self.readall()
-        data = ''
-        for newdata in datasource:
-            data += newdata
-            sol = 0
-            while True:
-                eol = string.find(data, "\n", sol)
-                if eol < 0:
-                    break
-                yield data[sol:eol+1]
-                sol = eol+1
-            data = data[sol:]
-        if data != '':
-            yield data
-
+from ._ranges import locators_and_ranges, Range
+from .arvfile import StreamFileReader
+from arvados.retry import retry_method
+from arvados.keep import *
+from . import config
+from . import errors
+from ._normalize_stream import normalize_stream
 
 class StreamReader(object):
-    def __init__(self, tokens):
-        self._tokens = tokens
-        self._pos = 0L
-
+    def __init__(self, tokens, keep=None, debug=False, _empty=False,
+                 num_retries=0):
         self._stream_name = None
-        self.data_locators = []
-        self.files = {}
+        self._data_locators = []
+        self._files = collections.OrderedDict()
+        self._keep = keep
+        self.num_retries = num_retries
 
-        streamoffset = 0L
+        streamoffset = 0
 
-        for tok in self._tokens:
-            if self._stream_name == None:
+        # parse stream
+        for tok in tokens:
+            if debug: print('tok', tok)
+            if self._stream_name is None:
                 self._stream_name = tok.replace('\\040', ' ')
                 continue
 
             s = re.match(r'^[0-9a-f]{32}\+(\d+)(\+\S+)*$', tok)
             if s:
-                blocksize = long(s.group(1))
-                self.data_locators.append([tok, blocksize, streamoffset])
+                blocksize = int(s.group(1))
+                self._data_locators.append(Range(tok, streamoffset, blocksize, 0))
                 streamoffset += blocksize
                 continue
 
             s = re.search(r'^(\d+):(\d+):(\S+)', tok)
             if s:
-                pos = long(s.group(1))
-                size = long(s.group(2))
+                pos = int(s.group(1))
+                size = int(s.group(2))
                 name = s.group(3).replace('\\040', ' ')
-                if name not in self.files:
-                    self.files[name] = StreamFileReader(self, [[pos, size, 0]], name)
+                if name not in self._files:
+                    self._files[name] = StreamFileReader(self, [Range(pos, 0, size, 0)], name)
                 else:
-                    n = self.files[name]
-                    n.segments.append([pos, size, n.size()])
+                    filereader = self._files[name]
+                    filereader.segments.append(Range(pos, filereader.size(), size))
                 continue
 
             raise errors.SyntaxError("Invalid manifest format")
-            
-    def tokens(self):
-        return self._tokens
 
     def name(self):
         return self._stream_name
 
-    def all_files(self):
-        return self.files.values()
-
-    def seek(self, pos):
-        """Set the position of the next read operation."""
-        self._pos = pos
+    def files(self):
+        return self._files
 
-    def tell(self):
-        return self._pos
+    def all_files(self):
+        return listvalues(self._files)
 
     def size(self):
-        n = self.data_locators[-1]
-        return n[self.OFFSET] + n[self.BLOCKSIZE]
+        n = self._data_locators[-1]
+        return n.range_start + n.range_size
 
     def locators_and_ranges(self, range_start, range_size):
-        return locators_and_ranges(self.data_locators, range_start, range_size)
+        return locators_and_ranges(self._data_locators, range_start, range_size)
 
-    def read(self, size):
-        """Read up to 'size' bytes from the stream, starting at the current file position"""
+    @retry_method
+    def _keepget(self, locator, num_retries=None):
+        return self._keep.get(locator, num_retries=num_retries)
+
+    @retry_method
+    def readfrom(self, start, size, num_retries=None):
+        """Read up to 'size' bytes from the stream, starting at 'start'"""
         if size == 0:
-            return ''
-        data = ''
-        for locator, blocksize, segmentoffset, segmentsize in locators_and_ranges(self.data_locators, self._pos, size):
-            data += Keep.get(locator)[segmentoffset:segmentoffset+segmentsize]
-        self._pos += len(data)
-        return data
+            return b''
+        if self._keep is None:
+            self._keep = KeepClient(num_retries=self.num_retries)
+        data = []
+        for lr in locators_and_ranges(self._data_locators, start, size):
+            data.append(self._keepget(lr.locator, num_retries=num_retries)[lr.segment_offset:lr.segment_offset+lr.segment_size])
+        return b''.join(data)
+
+    def manifest_text(self, strip=False):
+        manifest_text = [self.name().replace(' ', '\\040')]
+        if strip:
+            for d in self._data_locators:
+                m = re.match(r'^[0-9a-f]{32}\+\d+', d.locator)
+                manifest_text.append(m.group(0))
+        else:
+            manifest_text.extend([d.locator for d in self._data_locators])
+        manifest_text.extend([' '.join(["{}:{}:{}".format(seg.locator, seg.range_size, f.name.replace(' ', '\\040'))
+                                        for seg in f.segments])
+                              for f in listvalues(self._files)])
+        return ' '.join(manifest_text) + '\n'