proxy='', local_store='')
def test_KeepBasicRWTest(self):
+ self.assertEqual(0, self.keep_client.upload_counter.get())
foo_locator = self.keep_client.put('foo')
self.assertRegexpMatches(
foo_locator,
'^acbd18db4cc2f85cedef654fccc4a4d8\+3',
'wrong md5 hash from Keep.put("foo"): ' + foo_locator)
+
+ # 6 bytes because uploaded 2 copies
+ self.assertEqual(6, self.keep_client.upload_counter.get())
+
+ self.assertEqual(0, self.keep_client.download_counter.get())
self.assertEqual(self.keep_client.get(foo_locator),
'foo',
'wrong content from Keep.get(md5("foo"))')
+ self.assertEqual(3, self.keep_client.download_counter.get())
def test_KeepBinaryRWTest(self):
blob_str = '\xff\xfe\xf7\x00\x01\x02'
blob_str,
'wrong content from Keep.get(md5(<binarydata>))')
+ @unittest.skip("unreliable test - please fix and close #8752")
def test_KeepSingleCopyRWTest(self):
blob_str = '\xff\xfe\xfd\xfc\x00\x01\x02\x03'
blob_locator = self.keep_client.put(blob_str, copies=1)
# Must be a string type
self.keep_client.put({})
+ def test_KeepHeadTest(self):
+ locator = self.keep_client.put('test_head')
+ self.assertRegexpMatches(
+ locator,
+ '^b9a772c7049325feb7130fff1f8333e9\+9',
+ 'wrong md5 hash from Keep.put for "test_head": ' + locator)
+ self.assertEqual(True, self.keep_client.head(locator))
+ self.assertEqual(self.keep_client.get(locator),
+ 'test_head',
+ 'wrong content from Keep.get for "test_head"')
+
class KeepPermissionTestCase(run_test_server.TestCaseWithServers):
MAIN_SERVER = {}
KEEP_SERVER = {'blob_signing_key': 'abcdefghijk0123456789',
super(KeepProxyTestCase, self).tearDown()
def test_KeepProxyTest1(self):
- # Will use ARVADOS_KEEP_PROXY environment variable that is set by
- # setUpClass().
+ # Will use ARVADOS_KEEP_SERVICES environment variable that
+ # is set by setUpClass().
keep_client = arvados.KeepClient(api_client=self.api_client,
local_store='')
baz_locator = keep_client.put('baz')
'wrong content from Keep.get(md5("baz2"))')
self.assertTrue(keep_client.using_proxy)
+ def test_KeepProxyTestMultipleURIs(self):
+ # Test using ARVADOS_KEEP_SERVICES env var overriding any
+ # existing proxy setting and setting multiple proxies
+ arvados.config.settings()['ARVADOS_KEEP_SERVICES'] = 'http://10.0.0.1 https://foo.example.org:1234/'
+ keep_client = arvados.KeepClient(api_client=self.api_client,
+ local_store='')
+ uris = [x['_service_root'] for x in keep_client._keep_services]
+ self.assertEqual(uris, ['http://10.0.0.1/',
+ 'https://foo.example.org:1234/'])
+
+ def test_KeepProxyTestInvalidURI(self):
+ arvados.config.settings()['ARVADOS_KEEP_SERVICES'] = 'bad.uri.org'
+ with self.assertRaises(arvados.errors.ArgumentError):
+ keep_client = arvados.KeepClient(api_client=self.api_client,
+ local_store='')
+
class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
def get_service_roots(self, api_client):
mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),
int(arvados.KeepClient.DEFAULT_TIMEOUT[0]*1000))
self.assertEqual(
- mock.responses[0].getopt(pycurl.TIMEOUT_MS),
- int(arvados.KeepClient.DEFAULT_TIMEOUT[1]*1000))
+ mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),
+ int(arvados.KeepClient.DEFAULT_TIMEOUT[1]))
+ self.assertEqual(
+ mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),
+ int(arvados.KeepClient.DEFAULT_TIMEOUT[2]))
def test_put_timeout(self):
api_client = self.mock_keep_services(count=1)
mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),
int(arvados.KeepClient.DEFAULT_TIMEOUT[0]*1000))
self.assertEqual(
- mock.responses[0].getopt(pycurl.TIMEOUT_MS),
- int(arvados.KeepClient.DEFAULT_TIMEOUT[1]*1000))
+ mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),
+ int(arvados.KeepClient.DEFAULT_TIMEOUT[1]))
+ self.assertEqual(
+ mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),
+ int(arvados.KeepClient.DEFAULT_TIMEOUT[2]))
+
+ def test_head_timeout(self):
+ api_client = self.mock_keep_services(count=1)
+ force_timeout = socket.timeout("timed out")
+ with tutil.mock_keep_responses(force_timeout, 0) as mock:
+ keep_client = arvados.KeepClient(api_client=api_client)
+ with self.assertRaises(arvados.errors.KeepReadError):
+ keep_client.head('ffffffffffffffffffffffffffffffff')
+ self.assertEqual(
+ mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),
+ int(arvados.KeepClient.DEFAULT_TIMEOUT[0]*1000))
+ self.assertEqual(
+ mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),
+ int(arvados.KeepClient.DEFAULT_TIMEOUT[1]))
+ self.assertEqual(
+ mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),
+ int(arvados.KeepClient.DEFAULT_TIMEOUT[2]))
def test_proxy_get_timeout(self):
api_client = self.mock_keep_services(service_type='proxy', count=1)
mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),
int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[0]*1000))
self.assertEqual(
- mock.responses[0].getopt(pycurl.TIMEOUT_MS),
- int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[1]*1000))
+ mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),
+ int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[1]))
+ self.assertEqual(
+ mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),
+ int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[2]))
+
+ def test_proxy_head_timeout(self):
+ api_client = self.mock_keep_services(service_type='proxy', count=1)
+ force_timeout = socket.timeout("timed out")
+ with tutil.mock_keep_responses(force_timeout, 0) as mock:
+ keep_client = arvados.KeepClient(api_client=api_client)
+ with self.assertRaises(arvados.errors.KeepReadError):
+ keep_client.head('ffffffffffffffffffffffffffffffff')
+ self.assertEqual(
+ mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),
+ int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[0]*1000))
+ self.assertEqual(
+ mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),
+ int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[1]))
+ self.assertEqual(
+ mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),
+ int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[2]))
def test_proxy_put_timeout(self):
api_client = self.mock_keep_services(service_type='proxy', count=1)
mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),
int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[0]*1000))
self.assertEqual(
- mock.responses[0].getopt(pycurl.TIMEOUT_MS),
- int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[1]*1000))
+ mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),
+ int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[1]))
+ self.assertEqual(
+ mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),
+ int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[2]))
def check_no_services_error(self, verb, exc_class):
api_client = mock.MagicMock(name='api_client')
def test_get_error_with_no_services(self):
self.check_no_services_error('get', arvados.errors.KeepReadError)
+ def test_head_error_with_no_services(self):
+ self.check_no_services_error('head', arvados.errors.KeepReadError)
+
def test_put_error_with_no_services(self):
self.check_no_services_error('put', arvados.errors.KeepWriteError)
def test_get_error_reflects_last_retry(self):
self.check_errors_from_last_retry('get', arvados.errors.KeepReadError)
+ def test_head_error_reflects_last_retry(self):
+ self.check_errors_from_last_retry('head', arvados.errors.KeepReadError)
+
def test_put_error_reflects_last_retry(self):
self.check_errors_from_last_retry('put', arvados.errors.KeepWriteError)
def test_put_error_does_not_include_successful_puts(self):
data = 'partial failure test'
- data_loc = '{}+{}'.format(hashlib.md5(data).hexdigest(), len(data))
+ data_loc = tutil.str_keep_locator(data)
api_client = self.mock_keep_services(count=3)
with tutil.mock_keep_responses(data_loc, 200, 500, 500) as req_mock, \
self.assertRaises(arvados.errors.KeepWriteError) as exc_check:
def test_proxy_put_with_no_writable_services(self):
data = 'test with no writable services'
- data_loc = '{}+{}'.format(hashlib.md5(data).hexdigest(), len(data))
+ data_loc = tutil.str_keep_locator(data)
api_client = self.mock_keep_services(service_type='proxy', read_only=True, count=1)
with tutil.mock_keep_responses(data_loc, 200, 500, 500) as req_mock, \
self.assertRaises(arvados.errors.KeepWriteError) as exc_check:
self.assertEqual(True, ("no Keep services available" in str(exc_check.exception)))
self.assertEqual(0, len(exc_check.exception.request_errors()))
+ def test_oddball_service_get(self):
+ body = 'oddball service get'
+ api_client = self.mock_keep_services(service_type='fancynewblobstore')
+ with tutil.mock_keep_responses(body, 200):
+ keep_client = arvados.KeepClient(api_client=api_client)
+ actual = keep_client.get(tutil.str_keep_locator(body))
+ self.assertEqual(body, actual)
+
+ def test_oddball_service_put(self):
+ body = 'oddball service put'
+ pdh = tutil.str_keep_locator(body)
+ api_client = self.mock_keep_services(service_type='fancynewblobstore')
+ with tutil.mock_keep_responses(pdh, 200):
+ keep_client = arvados.KeepClient(api_client=api_client)
+ actual = keep_client.put(body, copies=1)
+ self.assertEqual(pdh, actual)
+
+ def test_oddball_service_writer_count(self):
+ body = 'oddball service writer count'
+ pdh = tutil.str_keep_locator(body)
+ api_client = self.mock_keep_services(service_type='fancynewblobstore',
+ count=4)
+ headers = {'x-keep-replicas-stored': 3}
+ with tutil.mock_keep_responses(pdh, 200, 418, 418, 418,
+ **headers) as req_mock:
+ keep_client = arvados.KeepClient(api_client=api_client)
+ actual = keep_client.put(body, copies=2)
+ self.assertEqual(pdh, actual)
+ self.assertEqual(1, req_mock.call_count)
+
@tutil.skip_sleep
class KeepClientRendezvousTestCase(unittest.TestCase, tutil.ApiClientMock):
self._test_probe_order_against_reference_set(
lambda i: self.keep_client.get(self.hashes[i], num_retries=1))
+ def test_head_probe_order_against_reference_set(self):
+ self._test_probe_order_against_reference_set(
+ lambda i: self.keep_client.head(self.hashes[i], num_retries=1))
+
def test_put_probe_order_against_reference_set(self):
# copies=1 prevents the test from being sensitive to races
# between writer threads.
got_order = [
re.search(r'//\[?keep0x([0-9a-f]+)', resp.getopt(pycurl.URL)).group(1)
for resp in mock.responses]
+ # With T threads racing to make requests, the position
+ # of a given server in the sequence of HTTP requests
+ # (got_order) cannot be more than T-1 positions
+ # earlier than that server's position in the reference
+ # probe sequence (expected_order).
+ #
+ # Loop invariant: we have accounted for +pos+ expected
+ # probes, either by seeing them in +got_order+ or by
+ # putting them in +pending+ in the hope of seeing them
+ # later. As long as +len(pending)<T+, we haven't
+ # started a request too early.
+ pending = []
for pos, expected in enumerate(self.expected_order[i]*3):
- # With C threads racing to make requests, the
- # position of a given server in the sequence of
- # HTTP requests (got_order) should be within C-1
- # positions of that server's position in the
- # reference probe sequence (expected_order).
- close_enough = False
- for diff in range(1-copies, copies):
- if 0 <= pos+diff < len(got_order):
- if expected == got_order[pos+diff]:
- close_enough = True
- self.assertEqual(
- True, close_enough,
- "With copies={}, got {}, expected {}".format(
- copies, repr(got_order), repr(self.expected_order[i]*3)))
+ got = got_order[pos-len(pending)]
+ while got in pending:
+ del pending[pending.index(got)]
+ got = got_order[pos-len(pending)]
+ if got != expected:
+ pending.append(expected)
+ self.assertLess(
+ len(pending), copies,
+ "pending={}, with copies={}, got {}, expected {}".format(
+ pending, copies, repr(got_order), repr(self.expected_order[i]*3)))
def test_probe_waste_adding_one_server(self):
hashes = [
def check_64_zeros_error_order(self, verb, exc_class):
data = '0' * 64
if verb == 'get':
- data = hashlib.md5(data).hexdigest() + '+1234'
+ data = tutil.str_keep_locator(data)
# Arbitrary port number:
aport = random.randint(1024,65535)
api_client = self.mock_keep_services(service_port=aport, count=self.services)
class KeepClientTimeout(unittest.TestCase, tutil.ApiClientMock):
- DATA = 'x' * 2**10
+ # BANDWIDTH_LOW_LIM must be less than len(DATA) so we can transfer
+ # 1s worth of data and then trigger bandwidth errors before running
+ # out of data.
+ DATA = 'x'*2**11
+ BANDWIDTH_LOW_LIM = 1024
+ TIMEOUT_TIME = 1.0
class assertTakesBetween(unittest.TestCase):
def __init__(self, tmin, tmax):
self.t0 = time.time()
def __exit__(self, *args, **kwargs):
- self.assertGreater(time.time() - self.t0, self.tmin)
- self.assertLess(time.time() - self.t0, self.tmax)
+ # Round times to milliseconds, like CURL. Otherwise, we
+ # fail when CURL reaches a 1s timeout at 0.9998s.
+ delta = round(time.time() - self.t0, 3)
+ self.assertGreaterEqual(delta, self.tmin)
+ self.assertLessEqual(delta, self.tmax)
+
+ class assertTakesGreater(unittest.TestCase):
+ def __init__(self, tmin):
+ self.tmin = tmin
+
+ def __enter__(self):
+ self.t0 = time.time()
+
+ def __exit__(self, *args, **kwargs):
+ delta = round(time.time() - self.t0, 3)
+ self.assertGreaterEqual(delta, self.tmin)
def setUp(self):
sock = socket.socket()
def tearDown(self):
self.server.shutdown()
- def keepClient(self, timeouts=(0.1, 1.0)):
+ def keepClient(self, timeouts=(0.1, TIMEOUT_TIME, BANDWIDTH_LOW_LIM)):
return arvados.KeepClient(
api_client=self.api_client,
timeout=timeouts)
)
with self.assertTakesBetween(0.1, 0.5):
with self.assertRaises(arvados.errors.KeepWriteError):
- self.keepClient((0.1, 1)).put(self.DATA, copies=1, num_retries=0)
+ self.keepClient().put(self.DATA, copies=1, num_retries=0)
+
+ def test_low_bandwidth_no_delays_success(self):
+ self.server.setbandwidth(2*self.BANDWIDTH_LOW_LIM)
+ kc = self.keepClient()
+ loc = kc.put(self.DATA, copies=1, num_retries=0)
+ self.assertEqual(self.DATA, kc.get(loc, num_retries=0))
+
+ def test_too_low_bandwidth_no_delays_failure(self):
+ # Check that lessening bandwidth corresponds to failing
+ kc = self.keepClient()
+ loc = kc.put(self.DATA, copies=1, num_retries=0)
+ self.server.setbandwidth(0.5*self.BANDWIDTH_LOW_LIM)
+ with self.assertTakesGreater(self.TIMEOUT_TIME):
+ with self.assertRaises(arvados.errors.KeepReadError) as e:
+ kc.get(loc, num_retries=0)
+ with self.assertTakesGreater(self.TIMEOUT_TIME):
+ with self.assertRaises(arvados.errors.KeepWriteError):
+ kc.put(self.DATA, copies=1, num_retries=0)
+
+ def test_low_bandwidth_with_server_response_delay_failure(self):
+ kc = self.keepClient()
+ loc = kc.put(self.DATA, copies=1, num_retries=0)
+ self.server.setbandwidth(self.BANDWIDTH_LOW_LIM)
+ self.server.setdelays(response=self.TIMEOUT_TIME)
+ with self.assertTakesGreater(self.TIMEOUT_TIME):
+ with self.assertRaises(arvados.errors.KeepReadError) as e:
+ kc.get(loc, num_retries=0)
+ with self.assertTakesGreater(self.TIMEOUT_TIME):
+ with self.assertRaises(arvados.errors.KeepWriteError):
+ kc.put(self.DATA, copies=1, num_retries=0)
+ with self.assertTakesGreater(self.TIMEOUT_TIME):
+ with self.assertRaises(arvados.errors.KeepReadError) as e:
+ kc.head(loc, num_retries=0)
+
+ def test_low_bandwidth_with_server_mid_delay_failure(self):
+ kc = self.keepClient()
+ loc = kc.put(self.DATA, copies=1, num_retries=0)
+ self.server.setbandwidth(self.BANDWIDTH_LOW_LIM)
+ self.server.setdelays(mid_write=self.TIMEOUT_TIME, mid_read=self.TIMEOUT_TIME)
+ with self.assertTakesGreater(self.TIMEOUT_TIME):
+ with self.assertRaises(arvados.errors.KeepReadError) as e:
+ kc.get(loc, num_retries=0)
+ with self.assertTakesGreater(self.TIMEOUT_TIME):
+ with self.assertRaises(arvados.errors.KeepWriteError):
+ kc.put(self.DATA, copies=1, num_retries=0)
def test_timeout_slow_request(self):
- self.server.setdelays(request=0.2)
- self._test_200ms()
+ loc = self.keepClient().put(self.DATA, copies=1, num_retries=0)
+ self.server.setdelays(request=.2)
+ self._test_connect_timeout_under_200ms(loc)
+ self.server.setdelays(request=2)
+ self._test_response_timeout_under_2s(loc)
def test_timeout_slow_response(self):
- self.server.setdelays(response=0.2)
- self._test_200ms()
+ loc = self.keepClient().put(self.DATA, copies=1, num_retries=0)
+ self.server.setdelays(response=.2)
+ self._test_connect_timeout_under_200ms(loc)
+ self.server.setdelays(response=2)
+ self._test_response_timeout_under_2s(loc)
def test_timeout_slow_response_body(self):
- self.server.setdelays(response_body=0.2)
- self._test_200ms()
-
- def _test_200ms(self):
- """Connect should be t<100ms, request should be 200ms <= t < 300ms"""
+ loc = self.keepClient().put(self.DATA, copies=1, num_retries=0)
+ self.server.setdelays(response_body=.2)
+ self._test_connect_timeout_under_200ms(loc)
+ self.server.setdelays(response_body=2)
+ self._test_response_timeout_under_2s(loc)
+ def _test_connect_timeout_under_200ms(self, loc):
# Allow 100ms to connect, then 1s for response. Everything
# should work, and everything should take at least 200ms to
# return.
- kc = self.keepClient((.1, 1))
+ kc = self.keepClient(timeouts=(.1, 1))
with self.assertTakesBetween(.2, .3):
- loc = kc.put(self.DATA, copies=1, num_retries=0)
+ kc.put(self.DATA, copies=1, num_retries=0)
with self.assertTakesBetween(.2, .3):
self.assertEqual(self.DATA, kc.get(loc, num_retries=0))
- # Allow 1s to connect, then 100ms for response. Nothing should
- # work, and everything should take at least 100ms to return.
- kc = self.keepClient((1, .1))
- with self.assertTakesBetween(.1, .2):
+ def _test_response_timeout_under_2s(self, loc):
+ # Allow 10s to connect, then 1s for response. Nothing should
+ # work, and everything should take at least 1s to return.
+ kc = self.keepClient(timeouts=(10, 1))
+ with self.assertTakesBetween(1, 9):
with self.assertRaises(arvados.errors.KeepReadError):
kc.get(loc, num_retries=0)
- with self.assertTakesBetween(.1, .2):
+ with self.assertTakesBetween(1, 9):
with self.assertRaises(arvados.errors.KeepWriteError):
kc.put(self.DATA, copies=1, num_retries=0)
self.assertEqual('foo', self.keepClient.get(locator))
self.assertEqual(self.gateway_roots[0]+locator,
MockCurl.return_value.getopt(pycurl.URL))
+ self.assertEqual(True, self.keepClient.head(locator))
@mock.patch('pycurl.Curl')
def test_get_with_gateway_hints_in_order(self, MockCurl):
mocks[i].getopt(pycurl.URL),
r'keep0x')
+ @mock.patch('pycurl.Curl')
+ def test_head_with_gateway_hints_in_order(self, MockCurl):
+ gateways = 4
+ disks = 3
+ mocks = [
+ tutil.FakeCurl.make(code=404, body='')
+ for _ in range(gateways+disks)
+ ]
+ MockCurl.side_effect = tutil.queue_with(mocks)
+ self.mock_disks_and_gateways(gateways=gateways, disks=disks)
+ locator = '+'.join(['acbd18db4cc2f85cedef654fccc4a4d8+3'] +
+ ['K@'+gw['uuid'] for gw in self.gateways])
+ with self.assertRaises(arvados.errors.NotFoundError):
+ self.keepClient.head(locator)
+ # Gateways are tried first, in the order given.
+ for i, root in enumerate(self.gateway_roots):
+ self.assertEqual(root+locator,
+ mocks[i].getopt(pycurl.URL))
+ # Disk services are tried next.
+ for i in range(gateways, gateways+disks):
+ self.assertRegexpMatches(
+ mocks[i].getopt(pycurl.URL),
+ r'keep0x')
+
@mock.patch('pycurl.Curl')
def test_get_with_remote_proxy_hint(self, MockCurl):
MockCurl.return_value = tutil.FakeCurl.make(
self.assertEqual('https://keep.xyzzy.arvadosapi.com/'+locator,
MockCurl.return_value.getopt(pycurl.URL))
+ @mock.patch('pycurl.Curl')
+ def test_head_with_remote_proxy_hint(self, MockCurl):
+ MockCurl.return_value = tutil.FakeCurl.make(
+ code=200, body='foo', headers={'Content-Length': 3})
+ self.mock_disks_and_gateways()
+ locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3+K@xyzzy'
+ self.assertEqual(True, self.keepClient.head(locator))
+ self.assertEqual('https://keep.xyzzy.arvadosapi.com/'+locator,
+ MockCurl.return_value.getopt(pycurl.URL))
+
class KeepClientRetryTestMixin(object):
# Testing with a local Keep store won't exercise the retry behavior.
with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 200):
self.check_success(num_retries=3)
+ def test_exception_then_success(self):
+ with self.TEST_PATCHER(self.DEFAULT_EXPECT, Exception('mock err'), 200):
+ self.check_success(num_retries=3)
+
def test_no_default_retry(self):
with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 200):
self.check_exception()
(self.DEFAULT_EXPECT, 200)):
self.check_success(locator=self.HINTED_LOCATOR)
+@tutil.skip_sleep
+class KeepClientRetryHeadTestCase(KeepClientRetryTestMixin, unittest.TestCase):
+ DEFAULT_EXPECT = True
+ DEFAULT_EXCEPTION = arvados.errors.KeepReadError
+ HINTED_LOCATOR = KeepClientRetryTestMixin.TEST_LOCATOR + '+K@xyzzy'
+ TEST_PATCHER = staticmethod(tutil.mock_keep_responses)
+
+ def run_method(self, locator=KeepClientRetryTestMixin.TEST_LOCATOR,
+ *args, **kwargs):
+ return self.new_client().head(locator, *args, **kwargs)
+
+ def test_specific_exception_when_not_found(self):
+ with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 200):
+ self.check_exception(arvados.errors.NotFoundError, num_retries=3)
+
+ def test_general_exception_with_mixed_errors(self):
+ # head should raise a NotFoundError if no server returns the block,
+ # and a high threshold of servers report that it's not found.
+ # This test rigs up 50/50 disagreement between two servers, and
+ # checks that it does not become a NotFoundError.
+ client = self.new_client()
+ with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 500):
+ with self.assertRaises(arvados.errors.KeepReadError) as exc_check:
+ client.head(self.HINTED_LOCATOR)
+ self.assertNotIsInstance(
+ exc_check.exception, arvados.errors.NotFoundError,
+ "mixed errors raised NotFoundError")
+
+ def test_hint_server_can_succeed_without_retries(self):
+ with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 200, 500):
+ self.check_success(locator=self.HINTED_LOCATOR)
+
+ def test_try_next_server_after_timeout(self):
+ with tutil.mock_keep_responses(
+ (socket.timeout("timed out"), 200),
+ (self.DEFAULT_EXPECT, 200)):
+ self.check_success(locator=self.HINTED_LOCATOR)
@tutil.skip_sleep
class KeepClientRetryPutTestCase(KeepClientRetryTestMixin, unittest.TestCase):
def test_do_not_send_multiple_copies_to_same_server(self):
with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 200):
self.check_exception(copies=2, num_retries=3)
+
+
+class AvoidOverreplication(unittest.TestCase, tutil.ApiClientMock):
+
+ class FakeKeepService(object):
+ def __init__(self, delay, will_succeed=False, will_raise=None, replicas=1):
+ self.delay = delay
+ self.will_succeed = will_succeed
+ self.will_raise = will_raise
+ self._result = {}
+ self._result['headers'] = {}
+ self._result['headers']['x-keep-replicas-stored'] = str(replicas)
+ self._result['body'] = 'foobar'
+
+ def put(self, data_hash, data, timeout):
+ time.sleep(self.delay)
+ if self.will_raise is not None:
+ raise self.will_raise
+ return self.will_succeed
+
+ def last_result(self):
+ if self.will_succeed:
+ return self._result
+
+ def finished(self):
+ return False
+
+ def setUp(self):
+ self.copies = 3
+ self.pool = arvados.KeepClient.KeepWriterThreadPool(
+ data = 'foo',
+ data_hash = 'acbd18db4cc2f85cedef654fccc4a4d8+3',
+ max_service_replicas = self.copies,
+ copies = self.copies
+ )
+
+ def test_only_write_enough_on_success(self):
+ for i in range(10):
+ ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
+ self.pool.add_task(ks, None)
+ self.pool.join()
+ self.assertEqual(self.pool.done(), self.copies)
+
+ def test_only_write_enough_on_partial_success(self):
+ for i in range(5):
+ ks = self.FakeKeepService(delay=i/10.0, will_succeed=False)
+ self.pool.add_task(ks, None)
+ ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
+ self.pool.add_task(ks, None)
+ self.pool.join()
+ self.assertEqual(self.pool.done(), self.copies)
+
+ def test_only_write_enough_when_some_crash(self):
+ for i in range(5):
+ ks = self.FakeKeepService(delay=i/10.0, will_raise=Exception())
+ self.pool.add_task(ks, None)
+ ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
+ self.pool.add_task(ks, None)
+ self.pool.join()
+ self.assertEqual(self.pool.done(), self.copies)
+
+ def test_fail_when_too_many_crash(self):
+ for i in range(self.copies+1):
+ ks = self.FakeKeepService(delay=i/10.0, will_raise=Exception())
+ self.pool.add_task(ks, None)
+ for i in range(self.copies-1):
+ ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
+ self.pool.add_task(ks, None)
+ self.pool.join()
+ self.assertEqual(self.pool.done(), self.copies-1)
+
+
+@tutil.skip_sleep
+class RetryNeedsMultipleServices(unittest.TestCase, tutil.ApiClientMock):
+ # Test put()s that need two distinct servers to succeed, possibly
+ # requiring multiple passes through the retry loop.
+
+ def setUp(self):
+ self.api_client = self.mock_keep_services(count=2)
+ self.keep_client = arvados.KeepClient(api_client=self.api_client)
+
+ def test_success_after_exception(self):
+ with tutil.mock_keep_responses(
+ 'acbd18db4cc2f85cedef654fccc4a4d8+3',
+ Exception('mock err'), 200, 200) as req_mock:
+ self.keep_client.put('foo', num_retries=1, copies=2)
+ self.assertEqual(3, req_mock.call_count)
+
+ def test_success_after_retryable_error(self):
+ with tutil.mock_keep_responses(
+ 'acbd18db4cc2f85cedef654fccc4a4d8+3',
+ 500, 200, 200) as req_mock:
+ self.keep_client.put('foo', num_retries=1, copies=2)
+ self.assertEqual(3, req_mock.call_count)
+
+ def test_fail_after_final_error(self):
+ # First retry loop gets a 200 (can't achieve replication by
+ # storing again on that server) and a 400 (can't retry that
+ # server at all), so we shouldn't try a third request.
+ with tutil.mock_keep_responses(
+ 'acbd18db4cc2f85cedef654fccc4a4d8+3',
+ 200, 400, 200) as req_mock:
+ with self.assertRaises(arvados.errors.KeepWriteError):
+ self.keep_client.put('foo', num_retries=1, copies=2)
+ self.assertEqual(2, req_mock.call_count)