from __future__ import division
-from past.utils import old_div
from builtins import object
import logging
hi = len(data_locators)
lo = 0
- i = int(old_div((hi + lo), 2))
+ i = (hi + lo) // 2
block_size = data_locators[i].range_size
block_start = data_locators[i].range_start
block_end = block_start + block_size
lo = i
else:
hi = i
- i = int(old_div((hi + lo), 2))
+ i = (hi + lo) // 2
block_size = data_locators[i].range_size
block_start = data_locators[i].range_start
block_end = block_start + block_size
from future import standard_library
standard_library.install_aliases()
from builtins import range
-from past.utils import old_div
from builtins import object
import functools
import os
"""
self._writers.remove(writer)
- if flush or self.size() > old_div(config.KEEP_BLOCK_SIZE, 2):
+ if flush or self.size() > config.KEEP_BLOCK_SIZE // 2:
# File writer closed, not small enough for repacking
self.flush()
elif self.closed():
standard_library.install_aliases()
from past.builtins import basestring
from builtins import object
-from past.utils import old_div
import argparse
import contextlib
import getpass
return "\r{}: {}M / {}M {:.1%} ".format(
obj_uuid,
bytes_written >> 20, bytes_expected >> 20,
- old_div(float(bytes_written), bytes_expected))
+ float(bytes_written) / bytes_expected)
else:
return "\r{}: {} ".format(obj_uuid, bytes_written)
from __future__ import print_function
from __future__ import division
-from past.utils import old_div
import argparse
import sys
return parser.parse_args(args)
def size_formatter(coll_file):
- return "{:>10}".format(old_div((coll_file.size() + 1023), 1024))
+ return "{:>10}".format((coll_file.size() + 1023) // 1024)
def name_formatter(coll_file):
return "{}/{}".format(coll_file.stream_name(), coll_file.name)
from __future__ import print_function
from __future__ import division
-from past.utils import old_div
import argparse
import time
import sys
logger.info("Already migrated %i images", len(already_migrated))
logger.info("Need to migrate %i images", len(need_migrate))
logger.info("Using tempdir %s", tempfile.gettempdir())
- logger.info("Biggest image is about %i MiB, tempdir needs at least %i MiB free", old_div(biggest,(2**20)), old_div((biggest*2),(2**20)))
+ logger.info("Biggest image is about %i MiB, tempdir needs at least %i MiB free", biggest>>20, biggest>>19)
if args.dry_run:
return
tarfile = list(oldcol.keys())[0]
logger.info("[%i/%i] Migrating %s:%s (%s) (%i MiB)", count, len(need_migrate), old_image["repo"],
- old_image["tag"], old_image["collection"], old_div(list(oldcol.values())[0].size(),(2**20)))
+ old_image["tag"], old_image["collection"], list(oldcol.values())[0].size()>>20)
count += 1
start = time.time()
from __future__ import division
from builtins import str
-from past.utils import old_div
from builtins import object
import argparse
import arvados
if bytes_expected:
return "\r{}M / {}M {:.1%} ".format(
bytes_written >> 20, bytes_expected >> 20,
- old_div(float(bytes_written), bytes_expected))
+ float(bytes_written) / bytes_expected)
else:
return "\r{} ".format(bytes_written)
from builtins import next
from builtins import str
from builtins import range
-from past.utils import old_div
from builtins import object
import io
import datetime
self._result['status_code'],
len(self._result['body']),
t.msecs,
- old_div((old_div(len(self._result['body']),(1024.0*1024))),t.secs) if t.secs > 0 else 0)
+ 1.0*len(self._result['body'])/2**20/t.secs if t.secs > 0 else 0)
if self.download_counter:
self.download_counter.add(len(self._result['body']))
self._result['status_code'],
len(body),
t.msecs,
- old_div((old_div(len(body),(1024.0*1024))),t.secs) if t.secs > 0 else 0)
+ 1.0*len(body)/2**20/t.secs if t.secs > 0 else 0)
if self.upload_counter:
self.upload_counter.add(len(body))
return True
if (not max_service_replicas) or (max_service_replicas >= copies):
num_threads = 1
else:
- num_threads = int(math.ceil(old_div(float(copies), max_service_replicas)))
+ num_threads = int(math.ceil(1.0*copies/max_service_replicas))
_logger.debug("Pool max threads is %d", num_threads)
self.workers = []
self.queue = KeepClient.KeepWriterQueue(copies)
from future import standard_library
standard_library.install_aliases()
from builtins import str
-from past.utils import old_div
import http.server
import hashlib
import os
if self.server.bandwidth == None and self.server.delays['mid_write'] == 0:
self.wfile.write(data_to_write)
else:
- BYTES_PER_WRITE = int(old_div(self.server.bandwidth,4.0)) or 32768
+ BYTES_PER_WRITE = int(self.server.bandwidth/4) or 32768
outage_happened = False
num_bytes = len(data_to_write)
num_sent_bytes = 0
num_sent_bytes:num_sent_bytes+num_write_bytes])
num_sent_bytes += num_write_bytes
if self.server.bandwidth is not None:
- target_time += old_div(num_write_bytes, self.server.bandwidth)
+ target_time += num_write_bytes / self.server.bandwidth
self.server._sleep_at_least(target_time - time.time())
return None
if self.server.bandwidth == None and self.server.delays['mid_read'] == 0:
return self.rfile.read(bytes_to_read)
else:
- BYTES_PER_READ = int(old_div(self.server.bandwidth,4.0)) or 32768
+ BYTES_PER_READ = int(self.server.bandwidth/4) or 32768
data = ''
outage_happened = False
bytes_read = 0
data += self.rfile.read(next_bytes_to_read)
bytes_read += next_bytes_to_read
if self.server.bandwidth is not None:
- target_time += old_div(next_bytes_to_read, self.server.bandwidth)
+ target_time += next_bytes_to_read / self.server.bandwidth
self.server._sleep_at_least(target_time - time.time())
return data
from __future__ import division
from builtins import str
from builtins import range
-from past.utils import old_div
import argparse
import atexit
import errno
# Use up to half of the +wait+ period waiting for "passenger
# stop" to work. If the process hasn't exited by then, start
# sending TERM signals.
- startTERM += old_div(wait,2)
+ startTERM += wait//2
server_pid = None
while now <= deadline and server_pid is None:
def test_version_argument(self):
returncode, out, err = self.run_arv_normalize(['--version'])
+ self.assertEqual(b'', out)
+ self.assertNotEqual(b'', err)
+ self.assertRegexpMatches(err.decode(), "^bin/arv-normalize [0-9]+\.[0-9]+\.[0-9]+$")
self.assertEqual(0, returncode)
- self.assertEqual('', out)
- self.assertNotEqual('', err)
- self.assertRegexpMatches(err, "[0-9]+\.[0-9]+\.[0-9]+")
standard_library.install_aliases()
from builtins import str
from builtins import range
-from past.utils import old_div
import apiclient
import io
import mock
_, self.large_file_name = tempfile.mkstemp()
fileobj = open(self.large_file_name, 'w')
# Make sure to write just a little more than one block
- for _ in range((old_div(arvados.config.KEEP_BLOCK_SIZE,(1024*1024)))+1):
- data = random.choice(['x', 'y', 'z']) * 1024 * 1024 # 1 MB
+ for _ in range((arvados.config.KEEP_BLOCK_SIZE>>20)+1):
+ data = random.choice(['x', 'y', 'z']) * 1024 * 1024 # 1 MiB
fileobj.write(data)
fileobj.close()
# Temp dir containing small files to be repacked
def test_known_human_progress(self):
for count, total in [(0, 1), (2, 4), (45, 60)]:
- expect = '{:.1%}'.format(old_div(float(count), total))
+ expect = '{:.1%}'.format(1.0*count/total)
actual = arv_put.human_progress(count, total)
self.assertTrue(actual.startswith('\r'))
self.assertIn(expect, actual)
standard_library.install_aliases()
from builtins import range
from builtins import object
-from past.utils import old_div
import arvados
import io
import logging
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
def localiso(self, t):
- return time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(t)) + self.isotz(old_div(-time.timezone,60))
+ return time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(t)) + self.isotz(-time.timezone//60)
def isotz(self, offset):
"""Convert minutes-east-of-UTC to RFC3339- and ISO-compatible time zone designator"""
- return '{:+03d}:{:02d}'.format(old_div(offset,60), offset%60)
+ return '{:+03d}:{:02d}'.format(offset//60, offset%60)
# Test websocket reconnection on (un)execpted close
def _test_websocket_reconnect(self, close_unexpected):
standard_library.install_aliases()
from builtins import str
from builtins import range
-from past.utils import old_div
from builtins import object
import hashlib
import mock
def test_only_write_enough_on_success(self):
for i in range(10):
- ks = self.FakeKeepService(delay=old_div(i,10.0), will_succeed=True)
+ ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
self.pool.add_task(ks, None)
self.pool.join()
self.assertEqual(self.pool.done(), self.copies)
def test_only_write_enough_on_partial_success(self):
for i in range(5):
- ks = self.FakeKeepService(delay=old_div(i,10.0), will_succeed=False)
+ ks = self.FakeKeepService(delay=i/10.0, will_succeed=False)
self.pool.add_task(ks, None)
- ks = self.FakeKeepService(delay=old_div(i,10.0), will_succeed=True)
+ ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
self.pool.add_task(ks, None)
self.pool.join()
self.assertEqual(self.pool.done(), self.copies)
def test_only_write_enough_when_some_crash(self):
for i in range(5):
- ks = self.FakeKeepService(delay=old_div(i,10.0), will_raise=Exception())
+ ks = self.FakeKeepService(delay=i/10.0, will_raise=Exception())
self.pool.add_task(ks, None)
- ks = self.FakeKeepService(delay=old_div(i,10.0), will_succeed=True)
+ ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
self.pool.add_task(ks, None)
self.pool.join()
self.assertEqual(self.pool.done(), self.copies)
def test_fail_when_too_many_crash(self):
for i in range(self.copies+1):
- ks = self.FakeKeepService(delay=old_div(i,10.0), will_raise=Exception())
+ ks = self.FakeKeepService(delay=i/10.0, will_raise=Exception())
self.pool.add_task(ks, None)
for i in range(self.copies-1):
- ks = self.FakeKeepService(delay=old_div(i,10.0), will_succeed=True)
+ ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
self.pool.add_task(ks, None)
self.pool.join()
self.assertEqual(self.pool.done(), self.copies-1)