+import bz2
+import datetime
+import fcntl
+import functools
import gflags
+import hashlib
+import json
import logging
import os
import pprint
-import sys
-import types
-import subprocess
-import json
-import UserDict
+import pycurl
+import Queue
import re
-import hashlib
+import socket
+import ssl
import string
-import bz2
-import zlib
-import fcntl
-import time
+import cStringIO
+import subprocess
+import sys
import threading
+import time
import timer
-import datetime
-import ssl
-import socket
-import requests
+import types
+import UserDict
+import zlib
import arvados
import arvados.config as config
import arvados.retry as retry
import arvados.util
-try:
- # Workaround for urllib3 bug.
- # The 'requests' library enables urllib3's SNI support by default, which uses pyopenssl.
- # However, urllib3 prior to version 1.10 has a major bug in this feature
- # (OpenSSL WantWriteError, https://github.com/shazow/urllib3/issues/412)
- # Unfortunately Debian 8 is stabilizing on urllib3 1.9.1 which means the
- # following workaround is necessary to be able to use
- # the arvados python sdk with the distribution-provided packages.
- import urllib3
- from pkg_resources import parse_version
- if parse_version(urllib3.__version__) < parse_version('1.10'):
- from urllib3.contrib import pyopenssl
- pyopenssl.extract_from_urllib3()
-except ImportError:
- pass
-
_logger = logging.getLogger('arvados.keep')
global_client_object = None
+
class KeepLocator(object):
EPOCH_DATETIME = datetime.datetime.utcfromtimestamp(0)
HINT_RE = re.compile(r'^[A-Z][A-Za-z0-9@_-]+$')
class KeepService(object):
- # Make requests to a single Keep service, and track results.
- HTTP_ERRORS = (requests.exceptions.RequestException,
- socket.error, ssl.SSLError)
+ """Make requests to a single Keep service, and track results.
+
+ A KeepService is intended to last long enough to perform one
+ transaction (GET or PUT) against one Keep service. This can
+ involve calling either get() or put() multiple times in order
+ to retry after transient failures. However, calling both get()
+ and put() on a single instance -- or using the same instance
+ to access two different Keep services -- will not produce
+ sensible behavior.
+ """
+
+ HTTP_ERRORS = (
+ socket.error,
+ ssl.SSLError,
+ arvados.errors.HttpError,
+ )
- def __init__(self, root, session, **headers):
+ def __init__(self, root, user_agent_pool=Queue.LifoQueue(), **headers):
self.root = root
- self.last_result = None
- self.success_flag = None
- self.session = session
+ self._user_agent_pool = user_agent_pool
+ self._result = {'error': None}
+ self._usable = True
+ self._session = None
self.get_headers = {'Accept': 'application/octet-stream'}
self.get_headers.update(headers)
self.put_headers = headers
def usable(self):
- return self.success_flag is not False
+ """Is it worth attempting a request?"""
+ return self._usable
def finished(self):
- return self.success_flag is not None
+ """Did the request succeed or encounter permanent failure?"""
+ return self._result['error'] == False or not self._usable
+
+ def last_result(self):
+ return self._result
- def last_status(self):
+ def _get_user_agent(self):
try:
- return self.last_result.status_code
- except AttributeError:
- return None
+ return self._user_agent_pool.get(False)
+ except Queue.Empty:
+ return pycurl.Curl()
+
+ def _put_user_agent(self, ua):
+ try:
+ ua.reset()
+ self._user_agent_pool.put(ua, False)
+ except:
+ ua.close()
+
+ @staticmethod
+ def _socket_open(family, socktype, protocol, address=None):
+ """Because pycurl doesn't have CURLOPT_TCP_KEEPALIVE"""
+ s = socket.socket(family, socktype, protocol)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+ s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 75)
+ s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 75)
+ return s
def get(self, locator, timeout=None):
# locator is a KeepLocator object.
url = self.root + str(locator)
_logger.debug("Request: GET %s", url)
+ curl = self._get_user_agent()
try:
with timer.Timer() as t:
- result = self.session.get(url.encode('utf-8'),
- headers=self.get_headers,
- timeout=timeout)
+ self._headers = {}
+ response_body = cStringIO.StringIO()
+ curl.setopt(pycurl.NOSIGNAL, 1)
+ curl.setopt(pycurl.OPENSOCKETFUNCTION, self._socket_open)
+ curl.setopt(pycurl.URL, url.encode('utf-8'))
+ curl.setopt(pycurl.HTTPHEADER, [
+ '{}: {}'.format(k,v) for k,v in self.get_headers.iteritems()])
+ curl.setopt(pycurl.WRITEFUNCTION, response_body.write)
+ curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+ self._setcurltimeouts(curl, timeout)
+ try:
+ curl.perform()
+ except Exception as e:
+ raise arvados.errors.HttpError(0, str(e))
+ self._result = {
+ 'status_code': curl.getinfo(pycurl.RESPONSE_CODE),
+ 'body': response_body.getvalue(),
+ 'headers': self._headers,
+ 'error': False,
+ }
+ ok = retry.check_http_response_success(self._result['status_code'])
+ if not ok:
+ self._result['error'] = arvados.errors.HttpError(
+ self._result['status_code'],
+ self._headers.get('x-status-line', 'Error'))
except self.HTTP_ERRORS as e:
- _logger.debug("Request fail: GET %s => %s: %s",
- url, type(e), str(e))
- self.last_result = e
+ self._result = {
+ 'error': e,
+ }
+ ok = False
+ self._usable = ok != False
+ if self._result.get('status_code', None):
+ # The client worked well enough to get an HTTP status
+ # code, so presumably any problems are just on the
+ # server side and it's OK to reuse the client.
+ self._put_user_agent(curl)
else:
- self.last_result = result
- self.success_flag = retry.check_http_response_success(result)
- content = result.content
- _logger.info("%s response: %s bytes in %s msec (%.3f MiB/sec)",
- self.last_status(), len(content), t.msecs,
- (len(content)/(1024.0*1024))/t.secs if t.secs > 0 else 0)
- if self.success_flag:
- resp_md5 = hashlib.md5(content).hexdigest()
- if resp_md5 == locator.md5sum:
- return content
- _logger.warning("Checksum fail: md5(%s) = %s",
- url, resp_md5)
- return None
+ # Don't return this client to the pool, in case it's
+ # broken.
+ curl.close()
+ if not ok:
+ _logger.debug("Request fail: GET %s => %s: %s",
+ url, type(self._result['error']), str(self._result['error']))
+ return None
+ _logger.info("%s response: %s bytes in %s msec (%.3f MiB/sec)",
+ self._result['status_code'],
+ len(self._result['body']),
+ t.msecs,
+ (len(self._result['body'])/(1024.0*1024))/t.secs if t.secs > 0 else 0)
+ resp_md5 = hashlib.md5(self._result['body']).hexdigest()
+ if resp_md5 != locator.md5sum:
+ _logger.warning("Checksum fail: md5(%s) = %s",
+ url, resp_md5)
+ self._result['error'] = arvados.errors.HttpError(
+ 0, 'Checksum fail')
+ return None
+ return self._result['body']
def put(self, hash_s, body, timeout=None):
url = self.root + hash_s
_logger.debug("Request: PUT %s", url)
+ curl = self._get_user_agent()
try:
- result = self.session.put(url.encode('utf-8'),
- data=body,
- headers=self.put_headers,
- timeout=timeout)
+ self._headers = {}
+ body_reader = cStringIO.StringIO(body)
+ response_body = cStringIO.StringIO()
+ curl.setopt(pycurl.NOSIGNAL, 1)
+ curl.setopt(pycurl.OPENSOCKETFUNCTION, self._socket_open)
+ curl.setopt(pycurl.URL, url.encode('utf-8'))
+ # Using UPLOAD tells cURL to wait for a "go ahead" from the
+ # Keep server (in the form of a HTTP/1.1 "100 Continue"
+ # response) instead of sending the request body immediately.
+ # This allows the server to reject the request if the request
+ # is invalid or the server is read-only, without waiting for
+ # the client to send the entire block.
+ curl.setopt(pycurl.UPLOAD, True)
+ curl.setopt(pycurl.INFILESIZE, len(body))
+ curl.setopt(pycurl.READFUNCTION, body_reader.read)
+ curl.setopt(pycurl.HTTPHEADER, [
+ '{}: {}'.format(k,v) for k,v in self.put_headers.iteritems()])
+ curl.setopt(pycurl.WRITEFUNCTION, response_body.write)
+ curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+ self._setcurltimeouts(curl, timeout)
+ try:
+ curl.perform()
+ except Exception as e:
+ raise arvados.errors.HttpError(0, str(e))
+ self._result = {
+ 'status_code': curl.getinfo(pycurl.RESPONSE_CODE),
+ 'body': response_body.getvalue(),
+ 'headers': self._headers,
+ 'error': False,
+ }
+ ok = retry.check_http_response_success(self._result['status_code'])
+ if not ok:
+ self._result['error'] = arvados.errors.HttpError(
+ self._result['status_code'],
+ self._headers.get('x-status-line', 'Error'))
except self.HTTP_ERRORS as e:
+ self._result = {
+ 'error': e,
+ }
+ ok = False
+ self._usable = ok != False # still usable if ok is True or None
+ if self._result.get('status_code', None):
+ # Client is functional. See comment in get().
+ self._put_user_agent(curl)
+ else:
+ curl.close()
+ if not ok:
_logger.debug("Request fail: PUT %s => %s: %s",
- url, type(e), str(e))
- self.last_result = e
+ url, type(self._result['error']), str(self._result['error']))
+ return False
+ return True
+
+ def _setcurltimeouts(self, curl, timeouts):
+ if not timeouts:
+ return
+ elif isinstance(timeouts, tuple):
+ conn_t, xfer_t = timeouts
+ else:
+ conn_t, xfer_t = (timeouts, timeouts)
+ curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(conn_t*1000))
+ curl.setopt(pycurl.TIMEOUT_MS, int(xfer_t*1000))
+
+ def _headerfunction(self, header_line):
+ header_line = header_line.decode('iso-8859-1')
+ if ':' in header_line:
+ name, value = header_line.split(':', 1)
+ name = name.strip().lower()
+ value = value.strip()
+ elif self._headers:
+ name = self._lastheadername
+ value = self._headers[name] + ' ' + header_line.strip()
+ elif header_line.startswith('HTTP/'):
+ name = 'x-status-line'
+ value = header_line
else:
- self.last_result = result
- self.success_flag = retry.check_http_response_success(result)
- return self.success_flag
+ _logger.error("Unexpected header line: %s", header_line)
+ return
+ self._lastheadername = name
+ self._headers[name] = value
+ # Returning None implies all bytes were written
class KeepWriterThread(threading.Thread):
self.args['data_hash'],
self.args['data'],
timeout=self.args.get('timeout', None)))
- status = self.service.last_status()
+ result = self.service.last_result()
if self._success:
- result = self.service.last_result
_logger.debug("KeepWriterThread %s succeeded %s+%i %s",
str(threading.current_thread()),
self.args['data_hash'],
# we're talking to a proxy or other backend that
# stores to multiple copies for us.
try:
- replicas_stored = int(result.headers['x-keep-replicas-stored'])
+ replicas_stored = int(result['headers']['x-keep-replicas-stored'])
except (KeyError, ValueError):
replicas_stored = 1
- limiter.save_response(result.content.strip(), replicas_stored)
- elif status is not None:
+ limiter.save_response(result['body'].strip(), replicas_stored)
+ elif result.get('status_code', None):
_logger.debug("Request fail: PUT %s => %s %s",
- self.args['data_hash'], status,
- self.service.last_result.content)
+ self.args['data_hash'],
+ result['status_code'],
+ result['body'])
def __init__(self, api_client=None, proxy=None,
The default number of times to retry failed requests.
This will be used as the default num_retries value when get() and
put() are called. Default 0.
-
- :session:
- The requests.Session object to use for get() and put() requests.
- Will create one if not specified.
"""
self.lock = threading.Lock()
if proxy is None:
self.block_cache = block_cache if block_cache else KeepBlockCache()
self.timeout = timeout
self.proxy_timeout = proxy_timeout
+ self._user_agent_pool = Queue.LifoQueue()
if local_store:
self.local_store = local_store
self.put = self.local_store_put
else:
self.num_retries = num_retries
- self.session = session if session is not None else requests.Session()
if proxy:
if not proxy.endswith('/'):
proxy += '/'
'uuid': 'proxy',
'_service_root': proxy,
}]
+ self._writable_services = self._keep_services
self.using_proxy = True
self._static_services_list = True
else:
self.api_token = api_client.api_token
self._gateway_services = {}
self._keep_services = None
+ self._writable_services = None
self.using_proxy = None
self._static_services_list = False
# Precompute the base URI for each service.
for r in accessible:
- r['_service_root'] = "{}://[{}]:{:d}/".format(
+ host = r['service_host']
+ if not host.startswith('[') and host.find(':') >= 0:
+ # IPv6 URIs must be formatted like http://[::1]:80/...
+ host = '[' + host + ']'
+ r['_service_root'] = "{}://{}:{:d}/".format(
'https' if r['service_ssl_flag'] else 'http',
- r['service_host'],
+ host,
r['service_port'])
# Gateway services are only used when specified by UUID,
self._keep_services = [
ks for ks in accessible
if ks.get('service_type') in ['disk', 'proxy']]
+ self._writable_services = [
+ ks for ks in accessible
+ if (ks.get('service_type') in ['disk', 'proxy']) and (True != ks.get('read_only'))]
_logger.debug(str(self._keep_services))
self.using_proxy = any(ks.get('service_type') == 'proxy'
"""
return hashlib.md5(data_hash + service_uuid[-15:]).hexdigest()
- def weighted_service_roots(self, locator, force_rebuild=False):
+ def weighted_service_roots(self, locator, force_rebuild=False, need_writable=False):
"""Return an array of Keep service endpoints, in the order in
which they should be probed when reading or writing data with
the given hash+hints.
# Sort the available local services by weight (heaviest first)
# for this locator, and return their service_roots (base URIs)
# in that order.
+ use_services = self._keep_services
+ if need_writable:
+ use_services = self._writable_services
sorted_roots.extend([
svc['_service_root'] for svc in sorted(
- self._keep_services,
+ use_services,
reverse=True,
key=lambda svc: self._service_weight(locator.md5sum, svc['uuid']))])
_logger.debug("{}: {}".format(locator, sorted_roots))
return sorted_roots
- def map_new_services(self, roots_map, locator, force_rebuild, **headers):
+ def map_new_services(self, roots_map, locator, force_rebuild, need_writable, **headers):
# roots_map is a dictionary, mapping Keep service root strings
# to KeepService objects. Poll for Keep services, and add any
# new ones to roots_map. Return the current list of local
# root strings.
headers.setdefault('Authorization', "OAuth2 %s" % (self.api_token,))
- local_roots = self.weighted_service_roots(locator, force_rebuild)
+ local_roots = self.weighted_service_roots(locator, force_rebuild, need_writable)
for root in local_roots:
if root not in roots_map:
- roots_map[root] = self.KeepService(root, self.session, **headers)
+ roots_map[root] = self.KeepService(
+ root, self._user_agent_pool, **headers)
return local_roots
@staticmethod
def get_from_cache(self, loc):
"""Fetch a block only if is in the cache, otherwise return None."""
slot = self.block_cache.get(loc)
- if slot.ready.is_set():
+ if slot is not None and slot.ready.is_set():
return slot.get()
else:
return None
self._gateway_services.get(hint[2:])
)])
# Map root URLs to their KeepService objects.
- roots_map = {root: self.KeepService(root, self.session) for root in hint_roots}
+ roots_map = {
+ root: self.KeepService(root, self._user_agent_pool)
+ for root in hint_roots
+ }
# See #3147 for a discussion of the loop implementation. Highlights:
# * Refresh the list of Keep services after each failure, in case
try:
sorted_roots = self.map_new_services(
roots_map, locator,
- force_rebuild=(tries_left < num_retries))
+ force_rebuild=(tries_left < num_retries),
+ need_writable=False)
except Exception as error:
loop.save_result(error)
continue
# Q: Including 403 is necessary for the Keep tests to continue
# passing, but maybe they should expect KeepReadError instead?
not_founds = sum(1 for key in sorted_roots
- if roots_map[key].last_status() in {403, 404, 410})
- service_errors = ((key, roots_map[key].last_result)
+ if roots_map[key].last_result().get('status_code', None) in {403, 404, 410})
+ service_errors = ((key, roots_map[key].last_result()['error'])
for key in sorted_roots)
if not roots_map:
raise arvados.errors.KeepReadError(
try:
local_roots = self.map_new_services(
roots_map, locator,
- force_rebuild=(tries_left < num_retries), **headers)
+ force_rebuild=(tries_left < num_retries), need_writable=True, **headers)
except Exception as error:
loop.save_result(error)
continue
"failed to write {}: no Keep services available ({})".format(
data_hash, loop.last_result()))
else:
- service_errors = ((key, roots_map[key].last_result)
+ service_errors = ((key, roots_map[key].last_result()['error'])
for key in local_roots
- if not roots_map[key].success_flag)
+ if roots_map[key].last_result()['error'])
raise arvados.errors.KeepWriteError(
"failed to write {} (wanted {} copies but wrote {})".format(
data_hash, copies, thread_limiter.done()), service_errors, label="service")