--- /dev/null
+import gflags
+import httplib
+import httplib2
+import logging
+import os
+import pprint
+import sys
+import types
+import subprocess
+import json
+import UserDict
+import re
+import hashlib
+import string
+import bz2
+import zlib
+import fcntl
+import time
+import threading
+
+class CollectionReader(object):
+ def __init__(self, manifest_locator_or_text):
+ if re.search(r'^\S+( [a-f0-9]{32,}(\+\S+)*)+( \d+:\d+:\S+)+\n', manifest_locator_or_text):
+ self._manifest_text = manifest_locator_or_text
+ self._manifest_locator = None
+ else:
+ self._manifest_locator = manifest_locator_or_text
+ self._manifest_text = None
+ self._streams = None
+ def __enter__(self):
+ pass
+ def __exit__(self):
+ pass
+ def _populate(self):
+ if self._streams != None:
+ return
+ if not self._manifest_text:
+ self._manifest_text = Keep.get(self._manifest_locator)
+ self._streams = []
+ for stream_line in self._manifest_text.split("\n"):
+ if stream_line != '':
+ stream_tokens = stream_line.split()
+ self._streams += [stream_tokens]
+ def all_streams(self):
+ self._populate()
+ resp = []
+ for s in self._streams:
+ resp += [StreamReader(s)]
+ return resp
+ def all_files(self):
+ for s in self.all_streams():
+ for f in s.all_files():
+ yield f
+ def manifest_text(self):
+ self._populate()
+ return self._manifest_text
+
+class CollectionWriter(object):
+ KEEP_BLOCK_SIZE = 2**26
+ def __init__(self):
+ self._data_buffer = []
+ self._data_buffer_len = 0
+ self._current_stream_files = []
+ self._current_stream_length = 0
+ self._current_stream_locators = []
+ self._current_stream_name = '.'
+ self._current_file_name = None
+ self._current_file_pos = 0
+ self._finished_streams = []
+ def __enter__(self):
+ pass
+ def __exit__(self):
+ self.finish()
+ def write_directory_tree(self,
+ path, stream_name='.', max_manifest_depth=-1):
+ self.start_new_stream(stream_name)
+ todo = []
+ if max_manifest_depth == 0:
+ dirents = sorted(util.listdir_recursive(path))
+ else:
+ dirents = sorted(os.listdir(path))
+ for dirent in dirents:
+ target = os.path.join(path, dirent)
+ if os.path.isdir(target):
+ todo += [[target,
+ os.path.join(stream_name, dirent),
+ max_manifest_depth-1]]
+ else:
+ self.start_new_file(dirent)
+ with open(target, 'rb') as f:
+ while True:
+ buf = f.read(2**26)
+ if len(buf) == 0:
+ break
+ self.write(buf)
+ self.finish_current_stream()
+ map(lambda x: self.write_directory_tree(*x), todo)
+
+ def write(self, newdata):
+ if hasattr(newdata, '__iter__'):
+ for s in newdata:
+ self.write(s)
+ return
+ self._data_buffer += [newdata]
+ self._data_buffer_len += len(newdata)
+ self._current_stream_length += len(newdata)
+ while self._data_buffer_len >= self.KEEP_BLOCK_SIZE:
+ self.flush_data()
+ def flush_data(self):
+ data_buffer = ''.join(self._data_buffer)
+ if data_buffer != '':
+ self._current_stream_locators += [Keep.put(data_buffer[0:self.KEEP_BLOCK_SIZE])]
+ self._data_buffer = [data_buffer[self.KEEP_BLOCK_SIZE:]]
+ self._data_buffer_len = len(self._data_buffer[0])
+ def start_new_file(self, newfilename=None):
+ self.finish_current_file()
+ self.set_current_file_name(newfilename)
+ def set_current_file_name(self, newfilename):
+ if re.search(r'[\t\n]', newfilename):
+ raise errors.AssertionError(
+ "Manifest filenames cannot contain whitespace: %s" %
+ newfilename)
+ self._current_file_name = newfilename
+ def current_file_name(self):
+ return self._current_file_name
+ def finish_current_file(self):
+ if self._current_file_name == None:
+ if self._current_file_pos == self._current_stream_length:
+ return
+ raise errors.AssertionError(
+ "Cannot finish an unnamed file " +
+ "(%d bytes at offset %d in '%s' stream)" %
+ (self._current_stream_length - self._current_file_pos,
+ self._current_file_pos,
+ self._current_stream_name))
+ self._current_stream_files += [[self._current_file_pos,
+ self._current_stream_length - self._current_file_pos,
+ self._current_file_name]]
+ self._current_file_pos = self._current_stream_length
+ def start_new_stream(self, newstreamname='.'):
+ self.finish_current_stream()
+ self.set_current_stream_name(newstreamname)
+ def set_current_stream_name(self, newstreamname):
+ if re.search(r'[\t\n]', newstreamname):
+ raise errors.AssertionError(
+ "Manifest stream names cannot contain whitespace")
+ self._current_stream_name = '.' if newstreamname=='' else newstreamname
+ def current_stream_name(self):
+ return self._current_stream_name
+ def finish_current_stream(self):
+ self.finish_current_file()
+ self.flush_data()
+ if len(self._current_stream_files) == 0:
+ pass
+ elif self._current_stream_name == None:
+ raise errors.AssertionError(
+ "Cannot finish an unnamed stream (%d bytes in %d files)" %
+ (self._current_stream_length, len(self._current_stream_files)))
+ else:
+ if len(self._current_stream_locators) == 0:
+ self._current_stream_locators += [EMPTY_BLOCK_LOCATOR]
+ self._finished_streams += [[self._current_stream_name,
+ self._current_stream_locators,
+ self._current_stream_files]]
+ self._current_stream_files = []
+ self._current_stream_length = 0
+ self._current_stream_locators = []
+ self._current_stream_name = None
+ self._current_file_pos = 0
+ self._current_file_name = None
+ def finish(self):
+ return Keep.put(self.manifest_text())
+ def manifest_text(self):
+ self.finish_current_stream()
+ manifest = ''
+ for stream in self._finished_streams:
+ if not re.search(r'^\.(/.*)?$', stream[0]):
+ manifest += './'
+ manifest += stream[0].replace(' ', '\\040')
+ for locator in stream[1]:
+ manifest += " %s" % locator
+ for sfile in stream[2]:
+ manifest += " %d:%d:%s" % (sfile[0], sfile[1], sfile[2].replace(' ', '\\040'))
+ manifest += "\n"
+ return manifest
+ def data_locators(self):
+ ret = []
+ for name, locators, files in self._finished_streams:
+ ret += locators
+ return ret
--- /dev/null
+import gflags
+import httplib
+import httplib2
+import logging
+import os
+import pprint
+import sys
+import types
+import subprocess
+import json
+import UserDict
+import re
+import hashlib
+import string
+import bz2
+import zlib
+import fcntl
+import time
+import threading
+
+global_client_object = None
+
+class Keep:
+ @staticmethod
+ def global_client_object():
+ global global_client_object
+ if global_client_object == None:
+ global_client_object = KeepClient()
+ return global_client_object
+
+ @staticmethod
+ def get(locator, **kwargs):
+ return Keep.global_client_object().get(locator, **kwargs)
+
+ @staticmethod
+ def put(data, **kwargs):
+ return Keep.global_client_object().put(data, **kwargs)
+
+class KeepClient(object):
+
+ class ThreadLimiter(object):
+ """
+ Limit the number of threads running at a given time to
+ {desired successes} minus {successes reported}. When successes
+ reported == desired, wake up the remaining threads and tell
+ them to quit.
+
+ Should be used in a "with" block.
+ """
+ def __init__(self, todo):
+ self._todo = todo
+ self._done = 0
+ self._todo_lock = threading.Semaphore(todo)
+ self._done_lock = threading.Lock()
+ def __enter__(self):
+ self._todo_lock.acquire()
+ return self
+ def __exit__(self, type, value, traceback):
+ self._todo_lock.release()
+ def shall_i_proceed(self):
+ """
+ Return true if the current thread should do stuff. Return
+ false if the current thread should just stop.
+ """
+ with self._done_lock:
+ return (self._done < self._todo)
+ def increment_done(self):
+ """
+ Report that the current thread was successful.
+ """
+ with self._done_lock:
+ self._done += 1
+ def done(self):
+ """
+ Return how many successes were reported.
+ """
+ with self._done_lock:
+ return self._done
+
+ class KeepWriterThread(threading.Thread):
+ """
+ Write a blob of data to the given Keep server. Call
+ increment_done() of the given ThreadLimiter if the write
+ succeeds.
+ """
+ def __init__(self, **kwargs):
+ super(KeepClient.KeepWriterThread, self).__init__()
+ self.args = kwargs
+ def run(self):
+ global config
+ with self.args['thread_limiter'] as limiter:
+ if not limiter.shall_i_proceed():
+ # My turn arrived, but the job has been done without
+ # me.
+ return
+ logging.debug("KeepWriterThread %s proceeding %s %s" %
+ (str(threading.current_thread()),
+ self.args['data_hash'],
+ self.args['service_root']))
+ h = httplib2.Http()
+ url = self.args['service_root'] + self.args['data_hash']
+ api_token = config['ARVADOS_API_TOKEN']
+ headers = {'Authorization': "OAuth2 %s" % api_token}
+ try:
+ resp, content = h.request(url.encode('utf-8'), 'PUT',
+ headers=headers,
+ body=self.args['data'])
+ if (resp['status'] == '401' and
+ re.match(r'Timestamp verification failed', content)):
+ body = KeepClient.sign_for_old_server(
+ self.args['data_hash'],
+ self.args['data'])
+ h = httplib2.Http()
+ resp, content = h.request(url.encode('utf-8'), 'PUT',
+ headers=headers,
+ body=body)
+ if re.match(r'^2\d\d$', resp['status']):
+ logging.debug("KeepWriterThread %s succeeded %s %s" %
+ (str(threading.current_thread()),
+ self.args['data_hash'],
+ self.args['service_root']))
+ return limiter.increment_done()
+ logging.warning("Request fail: PUT %s => %s %s" %
+ (url, resp['status'], content))
+ except (httplib2.HttpLib2Error, httplib.HTTPException) as e:
+ logging.warning("Request fail: PUT %s => %s: %s" %
+ (url, type(e), str(e)))
+
+ def __init__(self):
+ self.lock = threading.Lock()
+ self.service_roots = None
+
+ def shuffled_service_roots(self, hash):
+ if self.service_roots == None:
+ self.lock.acquire()
+ keep_disks = api().keep_disks().list().execute()['items']
+ roots = (("http%s://%s:%d/" %
+ ('s' if f['service_ssl_flag'] else '',
+ f['service_host'],
+ f['service_port']))
+ for f in keep_disks)
+ self.service_roots = sorted(set(roots))
+ logging.debug(str(self.service_roots))
+ self.lock.release()
+ seed = hash
+ pool = self.service_roots[:]
+ pseq = []
+ while len(pool) > 0:
+ if len(seed) < 8:
+ if len(pseq) < len(hash) / 4: # first time around
+ seed = hash[-4:] + hash
+ else:
+ seed += hash
+ probe = int(seed[0:8], 16) % len(pool)
+ pseq += [pool[probe]]
+ pool = pool[:probe] + pool[probe+1:]
+ seed = seed[8:]
+ logging.debug(str(pseq))
+ return pseq
+
+ def get(self, locator):
+ global config
+ if re.search(r',', locator):
+ return ''.join(self.get(x) for x in locator.split(','))
+ if 'KEEP_LOCAL_STORE' in os.environ:
+ return KeepClient.local_store_get(locator)
+ expect_hash = re.sub(r'\+.*', '', locator)
+ for service_root in self.shuffled_service_roots(expect_hash):
+ h = httplib2.Http()
+ url = service_root + expect_hash
+ api_token = config['ARVADOS_API_TOKEN']
+ headers = {'Authorization': "OAuth2 %s" % api_token,
+ 'Accept': 'application/octet-stream'}
+ try:
+ resp, content = h.request(url.encode('utf-8'), 'GET',
+ headers=headers)
+ if re.match(r'^2\d\d$', resp['status']):
+ m = hashlib.new('md5')
+ m.update(content)
+ md5 = m.hexdigest()
+ if md5 == expect_hash:
+ return content
+ logging.warning("Checksum fail: md5(%s) = %s" % (url, md5))
+ except (httplib2.HttpLib2Error, httplib.ResponseNotReady) as e:
+ logging.info("Request fail: GET %s => %s: %s" %
+ (url, type(e), str(e)))
+ raise errors.NotFoundError("Block not found: %s" % expect_hash)
+
+ def put(self, data, **kwargs):
+ if 'KEEP_LOCAL_STORE' in os.environ:
+ return KeepClient.local_store_put(data)
+ m = hashlib.new('md5')
+ m.update(data)
+ data_hash = m.hexdigest()
+ have_copies = 0
+ want_copies = kwargs.get('copies', 2)
+ if not (want_copies > 0):
+ return data_hash
+ threads = []
+ thread_limiter = KeepClient.ThreadLimiter(want_copies)
+ for service_root in self.shuffled_service_roots(data_hash):
+ t = KeepClient.KeepWriterThread(data=data,
+ data_hash=data_hash,
+ service_root=service_root,
+ thread_limiter=thread_limiter)
+ t.start()
+ threads += [t]
+ for t in threads:
+ t.join()
+ have_copies = thread_limiter.done()
+ if have_copies == want_copies:
+ return (data_hash + '+' + str(len(data)))
+ raise errors.KeepWriteError(
+ "Write fail for %s: wanted %d but wrote %d" %
+ (data_hash, want_copies, have_copies))
+
+ @staticmethod
+ def sign_for_old_server(data_hash, data):
+ return (("-----BEGIN PGP SIGNED MESSAGE-----\n\n\n%d %s\n-----BEGIN PGP SIGNATURE-----\n\n-----END PGP SIGNATURE-----\n" % (int(time.time()), data_hash)) + data)
+
+
+ @staticmethod
+ def local_store_put(data):
+ m = hashlib.new('md5')
+ m.update(data)
+ md5 = m.hexdigest()
+ locator = '%s+%d' % (md5, len(data))
+ with open(os.path.join(os.environ['KEEP_LOCAL_STORE'], md5 + '.tmp'), 'w') as f:
+ f.write(data)
+ os.rename(os.path.join(os.environ['KEEP_LOCAL_STORE'], md5 + '.tmp'),
+ os.path.join(os.environ['KEEP_LOCAL_STORE'], md5))
+ return locator
+ @staticmethod
+ def local_store_get(locator):
+ r = re.search('^([0-9a-f]{32,})', locator)
+ if not r:
+ raise errors.NotFoundError(
+ "Invalid data locator: '%s'" % locator)
+ if r.group(0) == EMPTY_BLOCK_LOCATOR.split('+')[0]:
+ return ''
+ with open(os.path.join(os.environ['KEEP_LOCAL_STORE'], r.group(0)), 'r') as f:
+ return f.read()
--- /dev/null
+import gflags
+import httplib
+import httplib2
+import logging
+import os
+import pprint
+import sys
+import types
+import subprocess
+import json
+import UserDict
+import re
+import hashlib
+import string
+import bz2
+import zlib
+import fcntl
+import time
+import threading
+
+class StreamFileReader(object):
+ def __init__(self, stream, pos, size, name):
+ self._stream = stream
+ self._pos = pos
+ self._size = size
+ self._name = name
+ self._filepos = 0
+ def name(self):
+ return self._name
+ def decompressed_name(self):
+ return re.sub('\.(bz2|gz)$', '', self._name)
+ def size(self):
+ return self._size
+ def stream_name(self):
+ return self._stream.name()
+ def read(self, size, **kwargs):
+ self._stream.seek(self._pos + self._filepos)
+ data = self._stream.read(min(size, self._size - self._filepos))
+ self._filepos += len(data)
+ return data
+ def readall(self, size=2**20, **kwargs):
+ while True:
+ data = self.read(size, **kwargs)
+ if data == '':
+ break
+ yield data
+ def bunzip2(self, size):
+ decompressor = bz2.BZ2Decompressor()
+ for chunk in self.readall(size):
+ data = decompressor.decompress(chunk)
+ if data and data != '':
+ yield data
+ def gunzip(self, size):
+ decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
+ for chunk in self.readall(size):
+ data = decompressor.decompress(decompressor.unconsumed_tail + chunk)
+ if data and data != '':
+ yield data
+ def readall_decompressed(self, size=2**20):
+ self._stream.seek(self._pos + self._filepos)
+ if re.search('\.bz2$', self._name):
+ return self.bunzip2(size)
+ elif re.search('\.gz$', self._name):
+ return self.gunzip(size)
+ else:
+ return self.readall(size)
+ def readlines(self, decompress=True):
+ if decompress:
+ datasource = self.readall_decompressed()
+ else:
+ self._stream.seek(self._pos + self._filepos)
+ datasource = self.readall()
+ data = ''
+ for newdata in datasource:
+ data += newdata
+ sol = 0
+ while True:
+ eol = string.find(data, "\n", sol)
+ if eol < 0:
+ break
+ yield data[sol:eol+1]
+ sol = eol+1
+ data = data[sol:]
+ if data != '':
+ yield data
+ def as_manifest(self):
+ if self.size() == 0:
+ return ("%s %s 0:0:%s\n"
+ % (self._stream.name(), EMPTY_BLOCK_LOCATOR, self.name()))
+ return string.join(self._stream.tokens_for_range(self._pos, self._size),
+ " ") + "\n"
+
+class StreamReader(object):
+ def __init__(self, tokens):
+ self._tokens = tokens
+ self._current_datablock_data = None
+ self._current_datablock_pos = 0
+ self._current_datablock_index = -1
+ self._pos = 0
+
+ self._stream_name = None
+ self.data_locators = []
+ self.files = []
+
+ for tok in self._tokens:
+ if self._stream_name == None:
+ self._stream_name = tok.replace('\\040', ' ')
+ elif re.search(r'^[0-9a-f]{32}(\+\S+)*$', tok):
+ self.data_locators += [tok]
+ elif re.search(r'^\d+:\d+:\S+', tok):
+ pos, size, name = tok.split(':',2)
+ self.files += [[int(pos), int(size), name.replace('\\040', ' ')]]
+ else:
+ raise errors.SyntaxError("Invalid manifest format")
+
+ def tokens(self):
+ return self._tokens
+ def tokens_for_range(self, range_start, range_size):
+ resp = [self._stream_name]
+ return_all_tokens = False
+ block_start = 0
+ token_bytes_skipped = 0
+ for locator in self.data_locators:
+ sizehint = re.search(r'\+(\d+)', locator)
+ if not sizehint:
+ return_all_tokens = True
+ if return_all_tokens:
+ resp += [locator]
+ next
+ blocksize = int(sizehint.group(0))
+ if range_start + range_size <= block_start:
+ break
+ if range_start < block_start + blocksize:
+ resp += [locator]
+ else:
+ token_bytes_skipped += blocksize
+ block_start += blocksize
+ for f in self.files:
+ if ((f[0] < range_start + range_size)
+ and
+ (f[0] + f[1] > range_start)
+ and
+ f[1] > 0):
+ resp += ["%d:%d:%s" % (f[0] - token_bytes_skipped, f[1], f[2])]
+ return resp
+ def name(self):
+ return self._stream_name
+ def all_files(self):
+ for f in self.files:
+ pos, size, name = f
+ yield StreamFileReader(self, pos, size, name)
+ def nextdatablock(self):
+ if self._current_datablock_index < 0:
+ self._current_datablock_pos = 0
+ self._current_datablock_index = 0
+ else:
+ self._current_datablock_pos += self.current_datablock_size()
+ self._current_datablock_index += 1
+ self._current_datablock_data = None
+ def current_datablock_data(self):
+ if self._current_datablock_data == None:
+ self._current_datablock_data = Keep.get(self.data_locators[self._current_datablock_index])
+ return self._current_datablock_data
+ def current_datablock_size(self):
+ if self._current_datablock_index < 0:
+ self.nextdatablock()
+ sizehint = re.search('\+(\d+)', self.data_locators[self._current_datablock_index])
+ if sizehint:
+ return int(sizehint.group(0))
+ return len(self.current_datablock_data())
+ def seek(self, pos):
+ """Set the position of the next read operation."""
+ self._pos = pos
+ def really_seek(self):
+ """Find and load the appropriate data block, so the byte at
+ _pos is in memory.
+ """
+ if self._pos == self._current_datablock_pos:
+ return True
+ if (self._current_datablock_pos != None and
+ self._pos >= self._current_datablock_pos and
+ self._pos <= self._current_datablock_pos + self.current_datablock_size()):
+ return True
+ if self._pos < self._current_datablock_pos:
+ self._current_datablock_index = -1
+ self.nextdatablock()
+ while (self._pos > self._current_datablock_pos and
+ self._pos > self._current_datablock_pos + self.current_datablock_size()):
+ self.nextdatablock()
+ def read(self, size):
+ """Read no more than size bytes -- but at least one byte,
+ unless _pos is already at the end of the stream.
+ """
+ if size == 0:
+ return ''
+ self.really_seek()
+ while self._pos >= self._current_datablock_pos + self.current_datablock_size():
+ self.nextdatablock()
+ if self._current_datablock_index >= len(self.data_locators):
+ return None
+ data = self.current_datablock_data()[self._pos - self._current_datablock_pos : self._pos - self._current_datablock_pos + size]
+ self._pos += len(data)
+ return data
import apiclient
import apiclient.discovery
+from Stream import *
+from Collection import *
+from Keep import *
+
+
# Arvados configuration settings are taken from $HOME/.config/arvados.
# Environment variables override settings in the config file.
#
allfiles += [ent_base]
return allfiles
-class StreamFileReader(object):
- def __init__(self, stream, pos, size, name):
- self._stream = stream
- self._pos = pos
- self._size = size
- self._name = name
- self._filepos = 0
- def name(self):
- return self._name
- def decompressed_name(self):
- return re.sub('\.(bz2|gz)$', '', self._name)
- def size(self):
- return self._size
- def stream_name(self):
- return self._stream.name()
- def read(self, size, **kwargs):
- self._stream.seek(self._pos + self._filepos)
- data = self._stream.read(min(size, self._size - self._filepos))
- self._filepos += len(data)
- return data
- def readall(self, size=2**20, **kwargs):
- while True:
- data = self.read(size, **kwargs)
- if data == '':
- break
- yield data
- def bunzip2(self, size):
- decompressor = bz2.BZ2Decompressor()
- for chunk in self.readall(size):
- data = decompressor.decompress(chunk)
- if data and data != '':
- yield data
- def gunzip(self, size):
- decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
- for chunk in self.readall(size):
- data = decompressor.decompress(decompressor.unconsumed_tail + chunk)
- if data and data != '':
- yield data
- def readall_decompressed(self, size=2**20):
- self._stream.seek(self._pos + self._filepos)
- if re.search('\.bz2$', self._name):
- return self.bunzip2(size)
- elif re.search('\.gz$', self._name):
- return self.gunzip(size)
- else:
- return self.readall(size)
- def readlines(self, decompress=True):
- if decompress:
- datasource = self.readall_decompressed()
- else:
- self._stream.seek(self._pos + self._filepos)
- datasource = self.readall()
- data = ''
- for newdata in datasource:
- data += newdata
- sol = 0
- while True:
- eol = string.find(data, "\n", sol)
- if eol < 0:
- break
- yield data[sol:eol+1]
- sol = eol+1
- data = data[sol:]
- if data != '':
- yield data
- def as_manifest(self):
- if self.size() == 0:
- return ("%s %s 0:0:%s\n"
- % (self._stream.name(), EMPTY_BLOCK_LOCATOR, self.name()))
- return string.join(self._stream.tokens_for_range(self._pos, self._size),
- " ") + "\n"
-
-class StreamReader(object):
- def __init__(self, tokens):
- self._tokens = tokens
- self._current_datablock_data = None
- self._current_datablock_pos = 0
- self._current_datablock_index = -1
- self._pos = 0
-
- self._stream_name = None
- self.data_locators = []
- self.files = []
-
- for tok in self._tokens:
- if self._stream_name == None:
- self._stream_name = tok.replace('\\040', ' ')
- elif re.search(r'^[0-9a-f]{32}(\+\S+)*$', tok):
- self.data_locators += [tok]
- elif re.search(r'^\d+:\d+:\S+', tok):
- pos, size, name = tok.split(':',2)
- self.files += [[int(pos), int(size), name.replace('\\040', ' ')]]
- else:
- raise errors.SyntaxError("Invalid manifest format")
-
- def tokens(self):
- return self._tokens
- def tokens_for_range(self, range_start, range_size):
- resp = [self._stream_name]
- return_all_tokens = False
- block_start = 0
- token_bytes_skipped = 0
- for locator in self.data_locators:
- sizehint = re.search(r'\+(\d+)', locator)
- if not sizehint:
- return_all_tokens = True
- if return_all_tokens:
- resp += [locator]
- next
- blocksize = int(sizehint.group(0))
- if range_start + range_size <= block_start:
- break
- if range_start < block_start + blocksize:
- resp += [locator]
- else:
- token_bytes_skipped += blocksize
- block_start += blocksize
- for f in self.files:
- if ((f[0] < range_start + range_size)
- and
- (f[0] + f[1] > range_start)
- and
- f[1] > 0):
- resp += ["%d:%d:%s" % (f[0] - token_bytes_skipped, f[1], f[2])]
- return resp
- def name(self):
- return self._stream_name
- def all_files(self):
- for f in self.files:
- pos, size, name = f
- yield StreamFileReader(self, pos, size, name)
- def nextdatablock(self):
- if self._current_datablock_index < 0:
- self._current_datablock_pos = 0
- self._current_datablock_index = 0
- else:
- self._current_datablock_pos += self.current_datablock_size()
- self._current_datablock_index += 1
- self._current_datablock_data = None
- def current_datablock_data(self):
- if self._current_datablock_data == None:
- self._current_datablock_data = Keep.get(self.data_locators[self._current_datablock_index])
- return self._current_datablock_data
- def current_datablock_size(self):
- if self._current_datablock_index < 0:
- self.nextdatablock()
- sizehint = re.search('\+(\d+)', self.data_locators[self._current_datablock_index])
- if sizehint:
- return int(sizehint.group(0))
- return len(self.current_datablock_data())
- def seek(self, pos):
- """Set the position of the next read operation."""
- self._pos = pos
- def really_seek(self):
- """Find and load the appropriate data block, so the byte at
- _pos is in memory.
- """
- if self._pos == self._current_datablock_pos:
- return True
- if (self._current_datablock_pos != None and
- self._pos >= self._current_datablock_pos and
- self._pos <= self._current_datablock_pos + self.current_datablock_size()):
- return True
- if self._pos < self._current_datablock_pos:
- self._current_datablock_index = -1
- self.nextdatablock()
- while (self._pos > self._current_datablock_pos and
- self._pos > self._current_datablock_pos + self.current_datablock_size()):
- self.nextdatablock()
- def read(self, size):
- """Read no more than size bytes -- but at least one byte,
- unless _pos is already at the end of the stream.
- """
- if size == 0:
- return ''
- self.really_seek()
- while self._pos >= self._current_datablock_pos + self.current_datablock_size():
- self.nextdatablock()
- if self._current_datablock_index >= len(self.data_locators):
- return None
- data = self.current_datablock_data()[self._pos - self._current_datablock_pos : self._pos - self._current_datablock_pos + size]
- self._pos += len(data)
- return data
-
-class CollectionReader(object):
- def __init__(self, manifest_locator_or_text):
- if re.search(r'^\S+( [a-f0-9]{32,}(\+\S+)*)+( \d+:\d+:\S+)+\n', manifest_locator_or_text):
- self._manifest_text = manifest_locator_or_text
- self._manifest_locator = None
- else:
- self._manifest_locator = manifest_locator_or_text
- self._manifest_text = None
- self._streams = None
- def __enter__(self):
- pass
- def __exit__(self):
- pass
- def _populate(self):
- if self._streams != None:
- return
- if not self._manifest_text:
- self._manifest_text = Keep.get(self._manifest_locator)
- self._streams = []
- for stream_line in self._manifest_text.split("\n"):
- if stream_line != '':
- stream_tokens = stream_line.split()
- self._streams += [stream_tokens]
- def all_streams(self):
- self._populate()
- resp = []
- for s in self._streams:
- resp += [StreamReader(s)]
- return resp
- def all_files(self):
- for s in self.all_streams():
- for f in s.all_files():
- yield f
- def manifest_text(self):
- self._populate()
- return self._manifest_text
-
-class CollectionWriter(object):
- KEEP_BLOCK_SIZE = 2**26
- def __init__(self):
- self._data_buffer = []
- self._data_buffer_len = 0
- self._current_stream_files = []
- self._current_stream_length = 0
- self._current_stream_locators = []
- self._current_stream_name = '.'
- self._current_file_name = None
- self._current_file_pos = 0
- self._finished_streams = []
- def __enter__(self):
- pass
- def __exit__(self):
- self.finish()
- def write_directory_tree(self,
- path, stream_name='.', max_manifest_depth=-1):
- self.start_new_stream(stream_name)
- todo = []
- if max_manifest_depth == 0:
- dirents = sorted(util.listdir_recursive(path))
- else:
- dirents = sorted(os.listdir(path))
- for dirent in dirents:
- target = os.path.join(path, dirent)
- if os.path.isdir(target):
- todo += [[target,
- os.path.join(stream_name, dirent),
- max_manifest_depth-1]]
- else:
- self.start_new_file(dirent)
- with open(target, 'rb') as f:
- while True:
- buf = f.read(2**26)
- if len(buf) == 0:
- break
- self.write(buf)
- self.finish_current_stream()
- map(lambda x: self.write_directory_tree(*x), todo)
-
- def write(self, newdata):
- if hasattr(newdata, '__iter__'):
- for s in newdata:
- self.write(s)
- return
- self._data_buffer += [newdata]
- self._data_buffer_len += len(newdata)
- self._current_stream_length += len(newdata)
- while self._data_buffer_len >= self.KEEP_BLOCK_SIZE:
- self.flush_data()
- def flush_data(self):
- data_buffer = ''.join(self._data_buffer)
- if data_buffer != '':
- self._current_stream_locators += [Keep.put(data_buffer[0:self.KEEP_BLOCK_SIZE])]
- self._data_buffer = [data_buffer[self.KEEP_BLOCK_SIZE:]]
- self._data_buffer_len = len(self._data_buffer[0])
- def start_new_file(self, newfilename=None):
- self.finish_current_file()
- self.set_current_file_name(newfilename)
- def set_current_file_name(self, newfilename):
- if re.search(r'[\t\n]', newfilename):
- raise errors.AssertionError(
- "Manifest filenames cannot contain whitespace: %s" %
- newfilename)
- self._current_file_name = newfilename
- def current_file_name(self):
- return self._current_file_name
- def finish_current_file(self):
- if self._current_file_name == None:
- if self._current_file_pos == self._current_stream_length:
- return
- raise errors.AssertionError(
- "Cannot finish an unnamed file " +
- "(%d bytes at offset %d in '%s' stream)" %
- (self._current_stream_length - self._current_file_pos,
- self._current_file_pos,
- self._current_stream_name))
- self._current_stream_files += [[self._current_file_pos,
- self._current_stream_length - self._current_file_pos,
- self._current_file_name]]
- self._current_file_pos = self._current_stream_length
- def start_new_stream(self, newstreamname='.'):
- self.finish_current_stream()
- self.set_current_stream_name(newstreamname)
- def set_current_stream_name(self, newstreamname):
- if re.search(r'[\t\n]', newstreamname):
- raise errors.AssertionError(
- "Manifest stream names cannot contain whitespace")
- self._current_stream_name = '.' if newstreamname=='' else newstreamname
- def current_stream_name(self):
- return self._current_stream_name
- def finish_current_stream(self):
- self.finish_current_file()
- self.flush_data()
- if len(self._current_stream_files) == 0:
- pass
- elif self._current_stream_name == None:
- raise errors.AssertionError(
- "Cannot finish an unnamed stream (%d bytes in %d files)" %
- (self._current_stream_length, len(self._current_stream_files)))
- else:
- if len(self._current_stream_locators) == 0:
- self._current_stream_locators += [EMPTY_BLOCK_LOCATOR]
- self._finished_streams += [[self._current_stream_name,
- self._current_stream_locators,
- self._current_stream_files]]
- self._current_stream_files = []
- self._current_stream_length = 0
- self._current_stream_locators = []
- self._current_stream_name = None
- self._current_file_pos = 0
- self._current_file_name = None
- def finish(self):
- return Keep.put(self.manifest_text())
- def manifest_text(self):
- self.finish_current_stream()
- manifest = ''
- for stream in self._finished_streams:
- if not re.search(r'^\.(/.*)?$', stream[0]):
- manifest += './'
- manifest += stream[0].replace(' ', '\\040')
- for locator in stream[1]:
- manifest += " %s" % locator
- for sfile in stream[2]:
- manifest += " %d:%d:%s" % (sfile[0], sfile[1], sfile[2].replace(' ', '\\040'))
- manifest += "\n"
- return manifest
- def data_locators(self):
- ret = []
- for name, locators, files in self._finished_streams:
- ret += locators
- return ret
-
-global_client_object = None
-
-class Keep:
- @staticmethod
- def global_client_object():
- global global_client_object
- if global_client_object == None:
- global_client_object = KeepClient()
- return global_client_object
-
- @staticmethod
- def get(locator, **kwargs):
- return Keep.global_client_object().get(locator, **kwargs)
- @staticmethod
- def put(data, **kwargs):
- return Keep.global_client_object().put(data, **kwargs)
-
-class KeepClient(object):
-
- class ThreadLimiter(object):
- """
- Limit the number of threads running at a given time to
- {desired successes} minus {successes reported}. When successes
- reported == desired, wake up the remaining threads and tell
- them to quit.
-
- Should be used in a "with" block.
- """
- def __init__(self, todo):
- self._todo = todo
- self._done = 0
- self._todo_lock = threading.Semaphore(todo)
- self._done_lock = threading.Lock()
- def __enter__(self):
- self._todo_lock.acquire()
- return self
- def __exit__(self, type, value, traceback):
- self._todo_lock.release()
- def shall_i_proceed(self):
- """
- Return true if the current thread should do stuff. Return
- false if the current thread should just stop.
- """
- with self._done_lock:
- return (self._done < self._todo)
- def increment_done(self):
- """
- Report that the current thread was successful.
- """
- with self._done_lock:
- self._done += 1
- def done(self):
- """
- Return how many successes were reported.
- """
- with self._done_lock:
- return self._done
-
- class KeepWriterThread(threading.Thread):
- """
- Write a blob of data to the given Keep server. Call
- increment_done() of the given ThreadLimiter if the write
- succeeds.
- """
- def __init__(self, **kwargs):
- super(KeepClient.KeepWriterThread, self).__init__()
- self.args = kwargs
- def run(self):
- global config
- with self.args['thread_limiter'] as limiter:
- if not limiter.shall_i_proceed():
- # My turn arrived, but the job has been done without
- # me.
- return
- logging.debug("KeepWriterThread %s proceeding %s %s" %
- (str(threading.current_thread()),
- self.args['data_hash'],
- self.args['service_root']))
- h = httplib2.Http()
- url = self.args['service_root'] + self.args['data_hash']
- api_token = config['ARVADOS_API_TOKEN']
- headers = {'Authorization': "OAuth2 %s" % api_token}
- try:
- resp, content = h.request(url.encode('utf-8'), 'PUT',
- headers=headers,
- body=self.args['data'])
- if (resp['status'] == '401' and
- re.match(r'Timestamp verification failed', content)):
- body = KeepClient.sign_for_old_server(
- self.args['data_hash'],
- self.args['data'])
- h = httplib2.Http()
- resp, content = h.request(url.encode('utf-8'), 'PUT',
- headers=headers,
- body=body)
- if re.match(r'^2\d\d$', resp['status']):
- logging.debug("KeepWriterThread %s succeeded %s %s" %
- (str(threading.current_thread()),
- self.args['data_hash'],
- self.args['service_root']))
- return limiter.increment_done()
- logging.warning("Request fail: PUT %s => %s %s" %
- (url, resp['status'], content))
- except (httplib2.HttpLib2Error, httplib.HTTPException) as e:
- logging.warning("Request fail: PUT %s => %s: %s" %
- (url, type(e), str(e)))
-
- def __init__(self):
- self.lock = threading.Lock()
- self.service_roots = None
-
- def shuffled_service_roots(self, hash):
- if self.service_roots == None:
- self.lock.acquire()
- keep_disks = api().keep_disks().list().execute()['items']
- roots = (("http%s://%s:%d/" %
- ('s' if f['service_ssl_flag'] else '',
- f['service_host'],
- f['service_port']))
- for f in keep_disks)
- self.service_roots = sorted(set(roots))
- logging.debug(str(self.service_roots))
- self.lock.release()
- seed = hash
- pool = self.service_roots[:]
- pseq = []
- while len(pool) > 0:
- if len(seed) < 8:
- if len(pseq) < len(hash) / 4: # first time around
- seed = hash[-4:] + hash
- else:
- seed += hash
- probe = int(seed[0:8], 16) % len(pool)
- pseq += [pool[probe]]
- pool = pool[:probe] + pool[probe+1:]
- seed = seed[8:]
- logging.debug(str(pseq))
- return pseq
-
- def get(self, locator):
- global config
- if re.search(r',', locator):
- return ''.join(self.get(x) for x in locator.split(','))
- if 'KEEP_LOCAL_STORE' in os.environ:
- return KeepClient.local_store_get(locator)
- expect_hash = re.sub(r'\+.*', '', locator)
- for service_root in self.shuffled_service_roots(expect_hash):
- h = httplib2.Http()
- url = service_root + expect_hash
- api_token = config['ARVADOS_API_TOKEN']
- headers = {'Authorization': "OAuth2 %s" % api_token,
- 'Accept': 'application/octet-stream'}
- try:
- resp, content = h.request(url.encode('utf-8'), 'GET',
- headers=headers)
- if re.match(r'^2\d\d$', resp['status']):
- m = hashlib.new('md5')
- m.update(content)
- md5 = m.hexdigest()
- if md5 == expect_hash:
- return content
- logging.warning("Checksum fail: md5(%s) = %s" % (url, md5))
- except (httplib2.HttpLib2Error, httplib.ResponseNotReady) as e:
- logging.info("Request fail: GET %s => %s: %s" %
- (url, type(e), str(e)))
- raise errors.NotFoundError("Block not found: %s" % expect_hash)
-
- def put(self, data, **kwargs):
- if 'KEEP_LOCAL_STORE' in os.environ:
- return KeepClient.local_store_put(data)
- m = hashlib.new('md5')
- m.update(data)
- data_hash = m.hexdigest()
- have_copies = 0
- want_copies = kwargs.get('copies', 2)
- if not (want_copies > 0):
- return data_hash
- threads = []
- thread_limiter = KeepClient.ThreadLimiter(want_copies)
- for service_root in self.shuffled_service_roots(data_hash):
- t = KeepClient.KeepWriterThread(data=data,
- data_hash=data_hash,
- service_root=service_root,
- thread_limiter=thread_limiter)
- t.start()
- threads += [t]
- for t in threads:
- t.join()
- have_copies = thread_limiter.done()
- if have_copies == want_copies:
- return (data_hash + '+' + str(len(data)))
- raise errors.KeepWriteError(
- "Write fail for %s: wanted %d but wrote %d" %
- (data_hash, want_copies, have_copies))
-
- @staticmethod
- def sign_for_old_server(data_hash, data):
- return (("-----BEGIN PGP SIGNED MESSAGE-----\n\n\n%d %s\n-----BEGIN PGP SIGNATURE-----\n\n-----END PGP SIGNATURE-----\n" % (int(time.time()), data_hash)) + data)
-
-
- @staticmethod
- def local_store_put(data):
- m = hashlib.new('md5')
- m.update(data)
- md5 = m.hexdigest()
- locator = '%s+%d' % (md5, len(data))
- with open(os.path.join(os.environ['KEEP_LOCAL_STORE'], md5 + '.tmp'), 'w') as f:
- f.write(data)
- os.rename(os.path.join(os.environ['KEEP_LOCAL_STORE'], md5 + '.tmp'),
- os.path.join(os.environ['KEEP_LOCAL_STORE'], md5))
- return locator
- @staticmethod
- def local_store_get(locator):
- r = re.search('^([0-9a-f]{32,})', locator)
- if not r:
- raise errors.NotFoundError(
- "Invalid data locator: '%s'" % locator)
- if r.group(0) == EMPTY_BLOCK_LOCATOR.split('+')[0]:
- return ''
- with open(os.path.join(os.environ['KEEP_LOCAL_STORE'], r.group(0)), 'r') as f:
- return f.read()
# We really shouldn't do this but some clients still use
# arvados.service.* directly instead of arvados.api().*