import re
import socket
import ssl
+import sys
import threading
import timer
+import urlparse
import arvados
import arvados.config as config
global_client_object = None
+# Monkey patch TCP constants when not available (apple). Values sourced from:
+# http://www.opensource.apple.com/source/xnu/xnu-2422.115.4/bsd/netinet/tcp.h
+if sys.platform == 'darwin':
+ if not hasattr(socket, 'TCP_KEEPALIVE'):
+ socket.TCP_KEEPALIVE = 0x010
+ if not hasattr(socket, 'TCP_KEEPINTVL'):
+ socket.TCP_KEEPINTVL = 0x101
+ if not hasattr(socket, 'TCP_KEEPCNT'):
+ socket.TCP_KEEPCNT = 0x102
+
+
class KeepLocator(object):
EPOCH_DATETIME = datetime.datetime.utcfromtimestamp(0)
HINT_RE = re.compile(r'^[A-Z][A-Za-z0-9@_-]+$')
self._cache_lock = threading.Lock()
class CacheSlot(object):
+ __slots__ = ("locator", "ready", "content")
+
def __init__(self, locator):
self.locator = locator
self.ready = threading.Event()
self._cache.insert(0, n)
return n, True
-class KeepClient(object):
-
- # Default Keep server connection timeout: 2 seconds
- # Default Keep server read timeout: 300 seconds
- # Default Keep proxy connection timeout: 20 seconds
- # Default Keep proxy read timeout: 300 seconds
- DEFAULT_TIMEOUT = (2, 300)
- DEFAULT_PROXY_TIMEOUT = (20, 300)
+class Counter(object):
+ def __init__(self, v=0):
+ self._lk = threading.Lock()
+ self._val = v
- class ThreadLimiter(object):
- """Limit the number of threads writing to Keep at once.
+ def add(self, v):
+ with self._lk:
+ self._val += v
- This ensures that only a number of writer threads that could
- potentially achieve the desired replication level run at once.
- Once the desired replication level is achieved, queued threads
- are instructed not to run.
+ def get(self):
+ with self._lk:
+ return self._val
- Should be used in a "with" block.
- """
- def __init__(self, want_copies, max_service_replicas):
- self._started = 0
- self._want_copies = want_copies
- self._done = 0
- self._response = None
- self._start_lock = threading.Condition()
- if (not max_service_replicas) or (max_service_replicas >= want_copies):
- max_threads = 1
- else:
- max_threads = math.ceil(float(want_copies) / max_service_replicas)
- _logger.debug("Limiter max threads is %d", max_threads)
- self._todo_lock = threading.Semaphore(max_threads)
- self._done_lock = threading.Lock()
- self._local = threading.local()
-
- def __enter__(self):
- self._start_lock.acquire()
- if getattr(self._local, 'sequence', None) is not None:
- # If the calling thread has used set_sequence(N), then
- # we wait here until N other threads have started.
- while self._started < self._local.sequence:
- self._start_lock.wait()
- self._todo_lock.acquire()
- self._started += 1
- self._start_lock.notifyAll()
- self._start_lock.release()
- return self
-
- def __exit__(self, type, value, traceback):
- self._todo_lock.release()
-
- def set_sequence(self, sequence):
- self._local.sequence = sequence
-
- def shall_i_proceed(self):
- """
- Return true if the current thread should write to Keep.
- Return false otherwise.
- """
- with self._done_lock:
- return (self._done < self._want_copies)
-
- def save_response(self, response_body, replicas_stored):
- """
- Records a response body (a locator, possibly signed) returned by
- the Keep server, and the number of replicas it stored.
- """
- with self._done_lock:
- self._done += replicas_stored
- self._response = response_body
- def response(self):
- """Return the body from the response to a PUT request."""
- with self._done_lock:
- return self._response
+class KeepClient(object):
- def done(self):
- """Return the total number of replicas successfully stored."""
- with self._done_lock:
- return self._done
+ # Default Keep server connection timeout: 2 seconds
+ # Default Keep server read timeout: 256 seconds
+ # Default Keep server bandwidth minimum: 32768 bytes per second
+ # Default Keep proxy connection timeout: 20 seconds
+ # Default Keep proxy read timeout: 256 seconds
+ # Default Keep proxy bandwidth minimum: 32768 bytes per second
+ DEFAULT_TIMEOUT = (2, 256, 32768)
+ DEFAULT_PROXY_TIMEOUT = (20, 256, 32768)
class KeepService(object):
arvados.errors.HttpError,
)
- def __init__(self, root, user_agent_pool=Queue.LifoQueue(), **headers):
+ def __init__(self, root, user_agent_pool=Queue.LifoQueue(),
+ upload_counter=None,
+ download_counter=None, **headers):
self.root = root
self._user_agent_pool = user_agent_pool
self._result = {'error': None}
self.get_headers = {'Accept': 'application/octet-stream'}
self.get_headers.update(headers)
self.put_headers = headers
+ self.upload_counter = upload_counter
+ self.download_counter = download_counter
def usable(self):
"""Is it worth attempting a request?"""
def _get_user_agent(self):
try:
- return self._user_agent_pool.get(False)
+ return self._user_agent_pool.get(block=False)
except Queue.Empty:
return pycurl.Curl()
def _put_user_agent(self, ua):
try:
ua.reset()
- self._user_agent_pool.put(ua, False)
+ self._user_agent_pool.put(ua, block=False)
except:
ua.close()
"""Because pycurl doesn't have CURLOPT_TCP_KEEPALIVE"""
s = socket.socket(family, socktype, protocol)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
- s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 75)
+ # Will throw invalid protocol error on mac. This test prevents that.
+ if hasattr(socket, 'TCP_KEEPIDLE'):
+ s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 75)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 75)
return s
- def get(self, locator, timeout=None):
+ def get(self, locator, method="GET", timeout=None):
# locator is a KeepLocator object.
url = self.root + str(locator)
- _logger.debug("Request: GET %s", url)
+ _logger.debug("Request: %s %s", method, url)
curl = self._get_user_agent()
+ ok = None
try:
with timer.Timer() as t:
self._headers = {}
'{}: {}'.format(k,v) for k,v in self.get_headers.iteritems()])
curl.setopt(pycurl.WRITEFUNCTION, response_body.write)
curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+ if method == "HEAD":
+ curl.setopt(pycurl.NOBODY, True)
self._setcurltimeouts(curl, timeout)
+
try:
curl.perform()
except Exception as e:
'headers': self._headers,
'error': False,
}
+
ok = retry.check_http_response_success(self._result['status_code'])
if not ok:
self._result['error'] = arvados.errors.HttpError(
self._result = {
'error': e,
}
- ok = False
self._usable = ok != False
if self._result.get('status_code', None):
# The client worked well enough to get an HTTP status
_logger.debug("Request fail: GET %s => %s: %s",
url, type(self._result['error']), str(self._result['error']))
return None
- _logger.info("%s response: %s bytes in %s msec (%.3f MiB/sec)",
+ if method == "HEAD":
+ _logger.info("HEAD %s: %s bytes",
+ self._result['status_code'],
+ self._result.get('content-length'))
+ return True
+
+ _logger.info("GET %s: %s bytes in %s msec (%.3f MiB/sec)",
self._result['status_code'],
len(self._result['body']),
t.msecs,
(len(self._result['body'])/(1024.0*1024))/t.secs if t.secs > 0 else 0)
+
+ if self.download_counter:
+ self.download_counter.add(len(self._result['body']))
resp_md5 = hashlib.md5(self._result['body']).hexdigest()
if resp_md5 != locator.md5sum:
_logger.warning("Checksum fail: md5(%s) = %s",
url = self.root + hash_s
_logger.debug("Request: PUT %s", url)
curl = self._get_user_agent()
+ ok = None
try:
- self._headers = {}
- body_reader = cStringIO.StringIO(body)
- response_body = cStringIO.StringIO()
- curl.setopt(pycurl.NOSIGNAL, 1)
- curl.setopt(pycurl.OPENSOCKETFUNCTION, self._socket_open)
- curl.setopt(pycurl.URL, url.encode('utf-8'))
- # Using UPLOAD tells cURL to wait for a "go ahead" from the
- # Keep server (in the form of a HTTP/1.1 "100 Continue"
- # response) instead of sending the request body immediately.
- # This allows the server to reject the request if the request
- # is invalid or the server is read-only, without waiting for
- # the client to send the entire block.
- curl.setopt(pycurl.UPLOAD, True)
- curl.setopt(pycurl.INFILESIZE, len(body))
- curl.setopt(pycurl.READFUNCTION, body_reader.read)
- curl.setopt(pycurl.HTTPHEADER, [
- '{}: {}'.format(k,v) for k,v in self.put_headers.iteritems()])
- curl.setopt(pycurl.WRITEFUNCTION, response_body.write)
- curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
- self._setcurltimeouts(curl, timeout)
- try:
- curl.perform()
- except Exception as e:
- raise arvados.errors.HttpError(0, str(e))
- self._result = {
- 'status_code': curl.getinfo(pycurl.RESPONSE_CODE),
- 'body': response_body.getvalue(),
- 'headers': self._headers,
- 'error': False,
- }
+ with timer.Timer() as t:
+ self._headers = {}
+ body_reader = cStringIO.StringIO(body)
+ response_body = cStringIO.StringIO()
+ curl.setopt(pycurl.NOSIGNAL, 1)
+ curl.setopt(pycurl.OPENSOCKETFUNCTION, self._socket_open)
+ curl.setopt(pycurl.URL, url.encode('utf-8'))
+ # Using UPLOAD tells cURL to wait for a "go ahead" from the
+ # Keep server (in the form of a HTTP/1.1 "100 Continue"
+ # response) instead of sending the request body immediately.
+ # This allows the server to reject the request if the request
+ # is invalid or the server is read-only, without waiting for
+ # the client to send the entire block.
+ curl.setopt(pycurl.UPLOAD, True)
+ curl.setopt(pycurl.INFILESIZE, len(body))
+ curl.setopt(pycurl.READFUNCTION, body_reader.read)
+ curl.setopt(pycurl.HTTPHEADER, [
+ '{}: {}'.format(k,v) for k,v in self.put_headers.iteritems()])
+ curl.setopt(pycurl.WRITEFUNCTION, response_body.write)
+ curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+ self._setcurltimeouts(curl, timeout)
+ try:
+ curl.perform()
+ except Exception as e:
+ raise arvados.errors.HttpError(0, str(e))
+ self._result = {
+ 'status_code': curl.getinfo(pycurl.RESPONSE_CODE),
+ 'body': response_body.getvalue(),
+ 'headers': self._headers,
+ 'error': False,
+ }
ok = retry.check_http_response_success(self._result['status_code'])
if not ok:
self._result['error'] = arvados.errors.HttpError(
self._result = {
'error': e,
}
- ok = False
self._usable = ok != False # still usable if ok is True or None
if self._result.get('status_code', None):
# Client is functional. See comment in get().
_logger.debug("Request fail: PUT %s => %s: %s",
url, type(self._result['error']), str(self._result['error']))
return False
+ _logger.info("PUT %s: %s bytes in %s msec (%.3f MiB/sec)",
+ self._result['status_code'],
+ len(body),
+ t.msecs,
+ (len(body)/(1024.0*1024))/t.secs if t.secs > 0 else 0)
+ if self.upload_counter:
+ self.upload_counter.add(len(body))
return True
def _setcurltimeouts(self, curl, timeouts):
if not timeouts:
return
elif isinstance(timeouts, tuple):
- conn_t, xfer_t = timeouts
+ if len(timeouts) == 2:
+ conn_t, xfer_t = timeouts
+ bandwidth_bps = KeepClient.DEFAULT_TIMEOUT[2]
+ else:
+ conn_t, xfer_t, bandwidth_bps = timeouts
else:
conn_t, xfer_t = (timeouts, timeouts)
+ bandwidth_bps = KeepClient.DEFAULT_TIMEOUT[2]
curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(conn_t*1000))
- curl.setopt(pycurl.TIMEOUT_MS, int(xfer_t*1000))
+ curl.setopt(pycurl.LOW_SPEED_TIME, int(math.ceil(xfer_t)))
+ curl.setopt(pycurl.LOW_SPEED_LIMIT, int(math.ceil(bandwidth_bps)))
def _headerfunction(self, header_line):
header_line = header_line.decode('iso-8859-1')
self._lastheadername = name
self._headers[name] = value
# Returning None implies all bytes were written
-
-
+
+
+ class KeepWriterQueue(Queue.Queue):
+ def __init__(self, copies):
+ Queue.Queue.__init__(self) # Old-style superclass
+ self.wanted_copies = copies
+ self.successful_copies = 0
+ self.response = None
+ self.successful_copies_lock = threading.Lock()
+ self.pending_tries = copies
+ self.pending_tries_notification = threading.Condition()
+
+ def write_success(self, response, replicas_nr):
+ with self.successful_copies_lock:
+ self.successful_copies += replicas_nr
+ self.response = response
+ with self.pending_tries_notification:
+ self.pending_tries_notification.notify_all()
+
+ def write_fail(self, ks):
+ with self.pending_tries_notification:
+ self.pending_tries += 1
+ self.pending_tries_notification.notify()
+
+ def pending_copies(self):
+ with self.successful_copies_lock:
+ return self.wanted_copies - self.successful_copies
+
+ def get_next_task(self):
+ with self.pending_tries_notification:
+ while True:
+ if self.pending_copies() < 1:
+ # This notify_all() is unnecessary --
+ # write_success() already called notify_all()
+ # when pending<1 became true, so it's not
+ # possible for any other thread to be in
+ # wait() now -- but it's cheap insurance
+ # against deadlock so we do it anyway:
+ self.pending_tries_notification.notify_all()
+ # Drain the queue and then raise Queue.Empty
+ while True:
+ self.get_nowait()
+ self.task_done()
+ elif self.pending_tries > 0:
+ service, service_root = self.get_nowait()
+ if service.finished():
+ self.task_done()
+ continue
+ self.pending_tries -= 1
+ return service, service_root
+ elif self.empty():
+ self.pending_tries_notification.notify_all()
+ raise Queue.Empty
+ else:
+ self.pending_tries_notification.wait()
+
+
+ class KeepWriterThreadPool(object):
+ def __init__(self, data, data_hash, copies, max_service_replicas, timeout=None):
+ self.total_task_nr = 0
+ self.wanted_copies = copies
+ if (not max_service_replicas) or (max_service_replicas >= copies):
+ num_threads = 1
+ else:
+ num_threads = int(math.ceil(float(copies) / max_service_replicas))
+ _logger.debug("Pool max threads is %d", num_threads)
+ self.workers = []
+ self.queue = KeepClient.KeepWriterQueue(copies)
+ # Create workers
+ for _ in range(num_threads):
+ w = KeepClient.KeepWriterThread(self.queue, data, data_hash, timeout)
+ self.workers.append(w)
+
+ def add_task(self, ks, service_root):
+ self.queue.put((ks, service_root))
+ self.total_task_nr += 1
+
+ def done(self):
+ return self.queue.successful_copies
+
+ def join(self):
+ # Start workers
+ for worker in self.workers:
+ worker.start()
+ # Wait for finished work
+ self.queue.join()
+
+ def response(self):
+ return self.queue.response
+
+
class KeepWriterThread(threading.Thread):
- """
- Write a blob of data to the given Keep server. On success, call
- save_response() of the given ThreadLimiter to save the returned
- locator.
- """
- def __init__(self, keep_service, **kwargs):
- super(KeepClient.KeepWriterThread, self).__init__()
- self.service = keep_service
- self.args = kwargs
- self._success = False
+ TaskFailed = RuntimeError()
- def success(self):
- return self._success
+ def __init__(self, queue, data, data_hash, timeout=None):
+ super(KeepClient.KeepWriterThread, self).__init__()
+ self.timeout = timeout
+ self.queue = queue
+ self.data = data
+ self.data_hash = data_hash
+ self.daemon = True
def run(self):
- limiter = self.args['thread_limiter']
- sequence = self.args['thread_sequence']
- if sequence is not None:
- limiter.set_sequence(sequence)
- with limiter:
- if not limiter.shall_i_proceed():
- # My turn arrived, but the job has been done without
- # me.
+ while True:
+ try:
+ service, service_root = self.queue.get_next_task()
+ except Queue.Empty:
return
- self.run_with_limiter(limiter)
-
- def run_with_limiter(self, limiter):
- if self.service.finished():
- return
- _logger.debug("KeepWriterThread %s proceeding %s+%i %s",
- str(threading.current_thread()),
- self.args['data_hash'],
- len(self.args['data']),
- self.args['service_root'])
- self._success = bool(self.service.put(
- self.args['data_hash'],
- self.args['data'],
- timeout=self.args.get('timeout', None)))
- result = self.service.last_result()
- if self._success:
- _logger.debug("KeepWriterThread %s succeeded %s+%i %s",
- str(threading.current_thread()),
- self.args['data_hash'],
- len(self.args['data']),
- self.args['service_root'])
- # Tick the 'done' counter for the number of replica
- # reported stored by the server, for the case that
- # we're talking to a proxy or other backend that
- # stores to multiple copies for us.
try:
- replicas_stored = int(result['headers']['x-keep-replicas-stored'])
- except (KeyError, ValueError):
- replicas_stored = 1
- limiter.save_response(result['body'].strip(), replicas_stored)
- elif result.get('status_code', None):
- _logger.debug("Request fail: PUT %s => %s %s",
- self.args['data_hash'],
- result['status_code'],
- result['body'])
+ locator, copies = self.do_task(service, service_root)
+ except Exception as e:
+ if e is not self.TaskFailed:
+ _logger.exception("Exception in KeepWriterThread")
+ self.queue.write_fail(service)
+ else:
+ self.queue.write_success(locator, copies)
+ finally:
+ self.queue.task_done()
+
+ def do_task(self, service, service_root):
+ success = bool(service.put(self.data_hash,
+ self.data,
+ timeout=self.timeout))
+ result = service.last_result()
+
+ if not success:
+ if result.get('status_code', None):
+ _logger.debug("Request fail: PUT %s => %s %s",
+ self.data_hash,
+ result['status_code'],
+ result['body'])
+ raise self.TaskFailed
+
+ _logger.debug("KeepWriterThread %s succeeded %s+%i %s",
+ str(threading.current_thread()),
+ self.data_hash,
+ len(self.data),
+ service_root)
+ try:
+ replicas_stored = int(result['headers']['x-keep-replicas-stored'])
+ except (KeyError, ValueError):
+ replicas_stored = 1
+
+ return result['body'].strip(), replicas_stored
def __init__(self, api_client=None, proxy=None,
:proxy:
If specified, this KeepClient will send requests to this Keep
proxy. Otherwise, KeepClient will fall back to the setting of the
- ARVADOS_KEEP_PROXY configuration setting. If you want to ensure
- KeepClient does not use a proxy, pass in an empty string.
+ ARVADOS_KEEP_SERVICES or ARVADOS_KEEP_PROXY configuration settings.
+ If you want to KeepClient does not use a proxy, pass in an empty
+ string.
:timeout:
The initial timeout (in seconds) for HTTP requests to Keep
- non-proxy servers. A tuple of two floats is interpreted as
- (connection_timeout, read_timeout): see
- http://docs.python-requests.org/en/latest/user/advanced/#timeouts.
- Because timeouts are often a result of transient server load, the
- actual connection timeout will be increased by a factor of two on
- each retry.
- Default: (2, 300).
+ non-proxy servers. A tuple of three floats is interpreted as
+ (connection_timeout, read_timeout, minimum_bandwidth). A connection
+ will be aborted if the average traffic rate falls below
+ minimum_bandwidth bytes per second over an interval of read_timeout
+ seconds. Because timeouts are often a result of transient server
+ load, the actual connection timeout will be increased by a factor
+ of two on each retry.
+ Default: (2, 256, 32768).
:proxy_timeout:
The initial timeout (in seconds) for HTTP requests to
- Keep proxies. A tuple of two floats is interpreted as
- (connection_timeout, read_timeout). The behavior described
- above for adjusting connection timeouts on retry also applies.
- Default: (20, 300).
+ Keep proxies. A tuple of three floats is interpreted as
+ (connection_timeout, read_timeout, minimum_bandwidth). The behavior
+ described above for adjusting connection timeouts on retry also
+ applies.
+ Default: (20, 256, 32768).
:api_token:
If you're not using an API client, but only talking
"""
self.lock = threading.Lock()
if proxy is None:
- proxy = config.get('ARVADOS_KEEP_PROXY')
+ if config.get('ARVADOS_KEEP_SERVICES'):
+ proxy = config.get('ARVADOS_KEEP_SERVICES')
+ else:
+ proxy = config.get('ARVADOS_KEEP_PROXY')
if api_token is None:
if api_client is None:
api_token = config.get('ARVADOS_API_TOKEN')
self.timeout = timeout
self.proxy_timeout = proxy_timeout
self._user_agent_pool = Queue.LifoQueue()
+ self.upload_counter = Counter()
+ self.download_counter = Counter()
+ self.put_counter = Counter()
+ self.get_counter = Counter()
+ self.hits_counter = Counter()
+ self.misses_counter = Counter()
if local_store:
self.local_store = local_store
self.num_retries = num_retries
self.max_replicas_per_service = None
if proxy:
- if not proxy.endswith('/'):
- proxy += '/'
+ proxy_uris = proxy.split()
+ for i in range(len(proxy_uris)):
+ if not proxy_uris[i].endswith('/'):
+ proxy_uris[i] += '/'
+ # URL validation
+ url = urlparse.urlparse(proxy_uris[i])
+ if not (url.scheme and url.netloc):
+ raise arvados.errors.ArgumentError("Invalid proxy URI: {}".format(proxy_uris[i]))
self.api_token = api_token
self._gateway_services = {}
self._keep_services = [{
- 'uuid': 'proxy',
+ 'uuid': "00000-bi6l4-%015d" % idx,
'service_type': 'proxy',
- '_service_root': proxy,
- }]
+ '_service_root': uri,
+ } for idx, uri in enumerate(proxy_uris)]
self._writable_services = self._keep_services
self.using_proxy = True
self._static_services_list = True
# TODO(twp): the timeout should be a property of a
# KeepService, not a KeepClient. See #4488.
t = self.proxy_timeout if self.using_proxy else self.timeout
- return (t[0] * (1 << attempt_number), t[1])
-
+ if len(t) == 2:
+ return (t[0] * (1 << attempt_number), t[1])
+ else:
+ return (t[0] * (1 << attempt_number), t[1], t[2])
def _any_nondisk_services(self, service_list):
return any(ks.get('service_type', 'disk') != 'disk'
for ks in service_list)
for root in local_roots:
if root not in roots_map:
roots_map[root] = self.KeepService(
- root, self._user_agent_pool, **headers)
+ root, self._user_agent_pool,
+ upload_counter=self.upload_counter,
+ download_counter=self.download_counter,
+ **headers)
return local_roots
@staticmethod
else:
return None
+ @retry.retry_method
+ def head(self, loc_s, num_retries=None):
+ return self._get_or_head(loc_s, method="HEAD", num_retries=num_retries)
+
@retry.retry_method
def get(self, loc_s, num_retries=None):
+ return self._get_or_head(loc_s, method="GET", num_retries=num_retries)
+
+ def _get_or_head(self, loc_s, method="GET", num_retries=None):
"""Get data from Keep.
This method fetches one or more blocks of data from Keep. It
"""
if ',' in loc_s:
return ''.join(self.get(x) for x in loc_s.split(','))
+
+ self.get_counter.add(1)
+
locator = KeepLocator(loc_s)
- slot, first = self.block_cache.reserve_cache(locator.md5sum)
- if not first:
- v = slot.get()
- return v
+ if method == "GET":
+ slot, first = self.block_cache.reserve_cache(locator.md5sum)
+ if not first:
+ self.hits_counter.add(1)
+ v = slot.get()
+ return v
+
+ self.misses_counter.add(1)
# If the locator has hints specifying a prefix (indicating a
# remote keepproxy) or the UUID of a local gateway service,
)])
# Map root URLs to their KeepService objects.
roots_map = {
- root: self.KeepService(root, self._user_agent_pool)
+ root: self.KeepService(root, self._user_agent_pool,
+ upload_counter=self.upload_counter,
+ download_counter=self.download_counter)
for root in hint_roots
}
for root in sorted_roots
if roots_map[root].usable()]
for keep_service in services_to_try:
- blob = keep_service.get(locator, timeout=self.current_timeout(num_retries-tries_left))
+ blob = keep_service.get(locator, method=method, timeout=self.current_timeout(num_retries-tries_left))
if blob is not None:
break
loop.save_result((blob, len(services_to_try)))
# Always cache the result, then return it if we succeeded.
- slot.set(blob)
- self.block_cache.cap_cache()
+ if method == "GET":
+ slot.set(blob)
+ self.block_cache.cap_cache()
if loop.success():
- return blob
+ if method == "HEAD":
+ return True
+ else:
+ return blob
# Q: Including 403 is necessary for the Keep tests to continue
# passing, but maybe they should expect KeepReadError instead?
elif not isinstance(data, str):
raise arvados.errors.ArgumentError("Argument 'data' to KeepClient.put is not type 'str'")
+ self.put_counter.add(1)
+
data_hash = hashlib.md5(data).hexdigest()
loc_s = data_hash + '+' + str(len(data))
if copies < 1:
headers = {}
# Tell the proxy how many copies we want it to store
- headers['X-Keep-Desired-Replication'] = str(copies)
+ headers['X-Keep-Desired-Replicas'] = str(copies)
roots_map = {}
loop = retry.RetryLoop(num_retries, self._check_loop_result,
backoff_start=2)
+ done = 0
for tries_left in loop:
try:
sorted_roots = self.map_new_services(
loop.save_result(error)
continue
- thread_limiter = KeepClient.ThreadLimiter(
- copies, self.max_replicas_per_service)
- threads = []
+ writer_pool = KeepClient.KeepWriterThreadPool(data=data,
+ data_hash=data_hash,
+ copies=copies - done,
+ max_service_replicas=self.max_replicas_per_service,
+ timeout=self.current_timeout(num_retries - tries_left))
for service_root, ks in [(root, roots_map[root])
for root in sorted_roots]:
if ks.finished():
continue
- t = KeepClient.KeepWriterThread(
- ks,
- data=data,
- data_hash=data_hash,
- service_root=service_root,
- thread_limiter=thread_limiter,
- timeout=self.current_timeout(num_retries-tries_left),
- thread_sequence=len(threads))
- t.start()
- threads.append(t)
- for t in threads:
- t.join()
- loop.save_result((thread_limiter.done() >= copies, len(threads)))
+ writer_pool.add_task(ks, service_root)
+ writer_pool.join()
+ done += writer_pool.done()
+ loop.save_result((done >= copies, writer_pool.total_task_nr))
if loop.success():
- return thread_limiter.response()
+ return writer_pool.response()
if not roots_map:
raise arvados.errors.KeepWriteError(
"failed to write {}: no Keep services available ({})".format(
if roots_map[key].last_result()['error'])
raise arvados.errors.KeepWriteError(
"failed to write {} (wanted {} copies but wrote {})".format(
- data_hash, copies, thread_limiter.done()), service_errors, label="service")
+ data_hash, copies, writer_pool.done()), service_errors, label="service")
def local_store_put(self, data, copies=1, num_retries=None):
"""A stub for put().