11308: Eliminate old_div().
authorTom Clegg <tom@curoverse.com>
Sat, 1 Apr 2017 06:11:08 +0000 (02:11 -0400)
committerTom Clegg <tom@curoverse.com>
Sat, 1 Apr 2017 21:45:05 +0000 (17:45 -0400)
13 files changed:
sdk/python/arvados/_ranges.py
sdk/python/arvados/arvfile.py
sdk/python/arvados/commands/arv_copy.py
sdk/python/arvados/commands/ls.py
sdk/python/arvados/commands/migrate19.py
sdk/python/arvados/commands/put.py
sdk/python/arvados/keep.py
sdk/python/tests/keepstub.py
sdk/python/tests/run_test_server.py
sdk/python/tests/test_arv_normalize.py
sdk/python/tests/test_arv_put.py
sdk/python/tests/test_events.py
sdk/python/tests/test_keep_client.py

index 5c8b00fc9d7e6a19ef66a04eb32500225651030d..6d7c112b99b8dc8818ef8f3a1988f6200117248c 100644 (file)
@@ -1,5 +1,4 @@
 from __future__ import division
-from past.utils import old_div
 from builtins import object
 import logging
 
@@ -34,7 +33,7 @@ def first_block(data_locators, range_start):
 
     hi = len(data_locators)
     lo = 0
-    i = int(old_div((hi + lo), 2))
+    i = (hi + lo) // 2
     block_size = data_locators[i].range_size
     block_start = data_locators[i].range_start
     block_end = block_start + block_size
@@ -50,7 +49,7 @@ def first_block(data_locators, range_start):
             lo = i
         else:
             hi = i
-        i = int(old_div((hi + lo), 2))
+        i = (hi + lo) // 2
         block_size = data_locators[i].range_size
         block_start = data_locators[i].range_start
         block_end = block_start + block_size
index ab1c64532a130d2880043c518a2c9f9d14b7fc05..c6cb1c91cc9469f8d8f2df6e37296104c886d705 100644 (file)
@@ -3,7 +3,6 @@ from __future__ import division
 from future import standard_library
 standard_library.install_aliases()
 from builtins import range
-from past.utils import old_div
 from builtins import object
 import functools
 import os
@@ -890,7 +889,7 @@ class ArvadosFile(object):
         """
         self._writers.remove(writer)
 
-        if flush or self.size() > old_div(config.KEEP_BLOCK_SIZE, 2):
+        if flush or self.size() > config.KEEP_BLOCK_SIZE // 2:
             # File writer closed, not small enough for repacking
             self.flush()
         elif self.closed():
index c5d74efe550a152a1e1f96e97d440084cbebd63f..ac24224c2001192b9c42e29e10724703ba0ab32c 100755 (executable)
@@ -21,7 +21,6 @@ from future import standard_library
 standard_library.install_aliases()
 from past.builtins import basestring
 from builtins import object
-from past.utils import old_div
 import argparse
 import contextlib
 import getpass
@@ -954,7 +953,7 @@ def human_progress(obj_uuid, bytes_written, bytes_expected):
         return "\r{}: {}M / {}M {:.1%} ".format(
             obj_uuid,
             bytes_written >> 20, bytes_expected >> 20,
-            old_div(float(bytes_written), bytes_expected))
+            float(bytes_written) / bytes_expected)
     else:
         return "\r{}: {} ".format(obj_uuid, bytes_written)
 
index ea93eff9d8c8ec681e94843a13ef38b877d19233..ca2d6d75dccf8f0e3a50f5b871757e721a7d47d2 100755 (executable)
@@ -3,7 +3,6 @@
 from __future__ import print_function
 from __future__ import division
 
-from past.utils import old_div
 import argparse
 import sys
 
@@ -28,7 +27,7 @@ def parse_args(args):
     return parser.parse_args(args)
 
 def size_formatter(coll_file):
-    return "{:>10}".format(old_div((coll_file.size() + 1023), 1024))
+    return "{:>10}".format((coll_file.size() + 1023) // 1024)
 
 def name_formatter(coll_file):
     return "{}/{}".format(coll_file.stream_name(), coll_file.name)
index 349e57b11e11a0268b2be6ca509801f5b6a56aef..724de7b3a72582df46f3746cd62dfa9f43e0bfd1 100644 (file)
@@ -1,6 +1,5 @@
 from __future__ import print_function
 from __future__ import division
-from past.utils import old_div
 import argparse
 import time
 import sys
@@ -136,7 +135,7 @@ def main(arguments=None):
     logger.info("Already migrated %i images", len(already_migrated))
     logger.info("Need to migrate %i images", len(need_migrate))
     logger.info("Using tempdir %s", tempfile.gettempdir())
-    logger.info("Biggest image is about %i MiB, tempdir needs at least %i MiB free", old_div(biggest,(2**20)), old_div((biggest*2),(2**20)))
+    logger.info("Biggest image is about %i MiB, tempdir needs at least %i MiB free", biggest>>20, biggest>>19)
 
     if args.dry_run:
         return
@@ -152,7 +151,7 @@ def main(arguments=None):
         tarfile = list(oldcol.keys())[0]
 
         logger.info("[%i/%i] Migrating %s:%s (%s) (%i MiB)", count, len(need_migrate), old_image["repo"],
-                    old_image["tag"], old_image["collection"], old_div(list(oldcol.values())[0].size(),(2**20)))
+                    old_image["tag"], old_image["collection"], list(oldcol.values())[0].size()>>20)
         count += 1
         start = time.time()
 
index 681e78eed68cd2a926aefd003f9cff88263fb15e..ef86fef679669f370567e0bd3d5eb46cd23082ec 100644 (file)
@@ -5,7 +5,6 @@
 
 from __future__ import division
 from builtins import str
-from past.utils import old_div
 from builtins import object
 import argparse
 import arvados
@@ -821,7 +820,7 @@ def human_progress(bytes_written, bytes_expected):
     if bytes_expected:
         return "\r{}M / {}M {:.1%} ".format(
             bytes_written >> 20, bytes_expected >> 20,
-            old_div(float(bytes_written), bytes_expected))
+            float(bytes_written) / bytes_expected)
     else:
         return "\r{} ".format(bytes_written)
 
index b709473b78c275ca02e1eb2c7015f3e28bf2ffb0..f680d6ce10daeaa546a73a8d1cca6dd906e4bc22 100644 (file)
@@ -5,7 +5,6 @@ standard_library.install_aliases()
 from builtins import next
 from builtins import str
 from builtins import range
-from past.utils import old_div
 from builtins import object
 import io
 import datetime
@@ -392,7 +391,7 @@ class KeepClient(object):
                          self._result['status_code'],
                          len(self._result['body']),
                          t.msecs,
-                         old_div((old_div(len(self._result['body']),(1024.0*1024))),t.secs) if t.secs > 0 else 0)
+                         1.0*len(self._result['body'])/2**20/t.secs if t.secs > 0 else 0)
 
             if self.download_counter:
                 self.download_counter.add(len(self._result['body']))
@@ -465,7 +464,7 @@ class KeepClient(object):
                          self._result['status_code'],
                          len(body),
                          t.msecs,
-                         old_div((old_div(len(body),(1024.0*1024))),t.secs) if t.secs > 0 else 0)
+                         1.0*len(body)/2**20/t.secs if t.secs > 0 else 0)
             if self.upload_counter:
                 self.upload_counter.add(len(body))
             return True
@@ -568,7 +567,7 @@ class KeepClient(object):
             if (not max_service_replicas) or (max_service_replicas >= copies):
                 num_threads = 1
             else:
-                num_threads = int(math.ceil(old_div(float(copies), max_service_replicas)))
+                num_threads = int(math.ceil(1.0*copies/max_service_replicas))
             _logger.debug("Pool max threads is %d", num_threads)
             self.workers = []
             self.queue = KeepClient.KeepWriterQueue(copies)
index 965bf299b86d9bb431e82dec94c46317922a1222..28bd483b40e5d05cc03932507ed04566afded8be 100644 (file)
@@ -2,7 +2,6 @@ from __future__ import division
 from future import standard_library
 standard_library.install_aliases()
 from builtins import str
-from past.utils import old_div
 import http.server
 import hashlib
 import os
@@ -64,7 +63,7 @@ class Handler(http.server.BaseHTTPRequestHandler, object):
         if self.server.bandwidth == None and self.server.delays['mid_write'] == 0:
             self.wfile.write(data_to_write)
         else:
-            BYTES_PER_WRITE = int(old_div(self.server.bandwidth,4.0)) or 32768
+            BYTES_PER_WRITE = int(self.server.bandwidth/4) or 32768
             outage_happened = False
             num_bytes = len(data_to_write)
             num_sent_bytes = 0
@@ -80,7 +79,7 @@ class Handler(http.server.BaseHTTPRequestHandler, object):
                     num_sent_bytes:num_sent_bytes+num_write_bytes])
                 num_sent_bytes += num_write_bytes
                 if self.server.bandwidth is not None:
-                    target_time += old_div(num_write_bytes, self.server.bandwidth)
+                    target_time += num_write_bytes / self.server.bandwidth
                     self.server._sleep_at_least(target_time - time.time())
         return None
 
@@ -88,7 +87,7 @@ class Handler(http.server.BaseHTTPRequestHandler, object):
         if self.server.bandwidth == None and self.server.delays['mid_read'] == 0:
             return self.rfile.read(bytes_to_read)
         else:
-            BYTES_PER_READ = int(old_div(self.server.bandwidth,4.0)) or 32768
+            BYTES_PER_READ = int(self.server.bandwidth/4) or 32768
             data = ''
             outage_happened = False
             bytes_read = 0
@@ -103,7 +102,7 @@ class Handler(http.server.BaseHTTPRequestHandler, object):
                 data += self.rfile.read(next_bytes_to_read)
                 bytes_read += next_bytes_to_read
                 if self.server.bandwidth is not None:
-                    target_time += old_div(next_bytes_to_read, self.server.bandwidth)
+                    target_time += next_bytes_to_read / self.server.bandwidth
                     self.server._sleep_at_least(target_time - time.time())
         return data
 
index d96612631885f9d1bf4f2321f457787fb3daa112..7dcd64703b8d5bc058fe22ca068af6ee74fa3750 100644 (file)
@@ -4,7 +4,6 @@ from __future__ import print_function
 from __future__ import division
 from builtins import str
 from builtins import range
-from past.utils import old_div
 import argparse
 import atexit
 import errno
@@ -100,7 +99,7 @@ def kill_server_pid(pidfile, wait=10, passenger_root=False):
         # Use up to half of the +wait+ period waiting for "passenger
         # stop" to work. If the process hasn't exited by then, start
         # sending TERM signals.
-        startTERM += old_div(wait,2)
+        startTERM += wait//2
 
     server_pid = None
     while now <= deadline and server_pid is None:
index 8bce7e3860ad3dc90864806ae513197e24249e1a..426d41a807dd542c38402850c27478d431520e0f 100644 (file)
@@ -21,7 +21,7 @@ class ArvNormalizeTestCase(unittest.TestCase):
 
     def test_version_argument(self):
         returncode, out, err = self.run_arv_normalize(['--version'])
+        self.assertEqual(b'', out)
+        self.assertNotEqual(b'', err)
+        self.assertRegexpMatches(err.decode(), "^bin/arv-normalize [0-9]+\.[0-9]+\.[0-9]+$")
         self.assertEqual(0, returncode)
-        self.assertEqual('', out)
-        self.assertNotEqual('', err)
-        self.assertRegexpMatches(err, "[0-9]+\.[0-9]+\.[0-9]+")
index 5abf38854abc900146723fd27a549823eaff5882..667abbe63ef81971c0152d3a37576df32ca6d77f 100644 (file)
@@ -7,7 +7,6 @@ from future import standard_library
 standard_library.install_aliases()
 from builtins import str
 from builtins import range
-from past.utils import old_div
 import apiclient
 import io
 import mock
@@ -264,8 +263,8 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
         _, self.large_file_name = tempfile.mkstemp()
         fileobj = open(self.large_file_name, 'w')
         # Make sure to write just a little more than one block
-        for _ in range((old_div(arvados.config.KEEP_BLOCK_SIZE,(1024*1024)))+1):
-            data = random.choice(['x', 'y', 'z']) * 1024 * 1024 # 1 MB
+        for _ in range((arvados.config.KEEP_BLOCK_SIZE>>20)+1):
+            data = random.choice(['x', 'y', 'z']) * 1024 * 1024 # 1 MiB
             fileobj.write(data)
         fileobj.close()
         # Temp dir containing small files to be repacked
@@ -531,7 +530,7 @@ class ArvadosPutReportTest(ArvadosBaseTestCase):
 
     def test_known_human_progress(self):
         for count, total in [(0, 1), (2, 4), (45, 60)]:
-            expect = '{:.1%}'.format(old_div(float(count), total))
+            expect = '{:.1%}'.format(1.0*count/total)
             actual = arv_put.human_progress(count, total)
             self.assertTrue(actual.startswith('\r'))
             self.assertIn(expect, actual)
index 2cca77339d1b8be66420db60f7605ec436beef33..4596b6cdfbd6499b6bfc69aa4cbfd44da19300b6 100644 (file)
@@ -5,7 +5,6 @@ from future import standard_library
 standard_library.install_aliases()
 from builtins import range
 from builtins import object
-from past.utils import old_div
 import arvados
 import io
 import logging
@@ -151,11 +150,11 @@ class WebsocketTest(run_test_server.TestCaseWithServers):
         return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
 
     def localiso(self, t):
-        return time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(t)) + self.isotz(old_div(-time.timezone,60))
+        return time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(t)) + self.isotz(-time.timezone//60)
 
     def isotz(self, offset):
         """Convert minutes-east-of-UTC to RFC3339- and ISO-compatible time zone designator"""
-        return '{:+03d}:{:02d}'.format(old_div(offset,60), offset%60)
+        return '{:+03d}:{:02d}'.format(offset//60, offset%60)
 
     # Test websocket reconnection on (un)execpted close
     def _test_websocket_reconnect(self, close_unexpected):
index b69563f94238aee5b9e53713e7ab5df7b59ba582..8c2a67130ad491f7ab7b0c124f566f00fef3453e 100644 (file)
@@ -4,7 +4,6 @@ from future import standard_library
 standard_library.install_aliases()
 from builtins import str
 from builtins import range
-from past.utils import old_div
 from builtins import object
 import hashlib
 import mock
@@ -1125,35 +1124,35 @@ class AvoidOverreplication(unittest.TestCase, tutil.ApiClientMock):
 
     def test_only_write_enough_on_success(self):
         for i in range(10):
-            ks = self.FakeKeepService(delay=old_div(i,10.0), will_succeed=True)
+            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
             self.pool.add_task(ks, None)
         self.pool.join()
         self.assertEqual(self.pool.done(), self.copies)
 
     def test_only_write_enough_on_partial_success(self):
         for i in range(5):
-            ks = self.FakeKeepService(delay=old_div(i,10.0), will_succeed=False)
+            ks = self.FakeKeepService(delay=i/10.0, will_succeed=False)
             self.pool.add_task(ks, None)
-            ks = self.FakeKeepService(delay=old_div(i,10.0), will_succeed=True)
+            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
             self.pool.add_task(ks, None)
         self.pool.join()
         self.assertEqual(self.pool.done(), self.copies)
 
     def test_only_write_enough_when_some_crash(self):
         for i in range(5):
-            ks = self.FakeKeepService(delay=old_div(i,10.0), will_raise=Exception())
+            ks = self.FakeKeepService(delay=i/10.0, will_raise=Exception())
             self.pool.add_task(ks, None)
-            ks = self.FakeKeepService(delay=old_div(i,10.0), will_succeed=True)
+            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
             self.pool.add_task(ks, None)
         self.pool.join()
         self.assertEqual(self.pool.done(), self.copies)
 
     def test_fail_when_too_many_crash(self):
         for i in range(self.copies+1):
-            ks = self.FakeKeepService(delay=old_div(i,10.0), will_raise=Exception())
+            ks = self.FakeKeepService(delay=i/10.0, will_raise=Exception())
             self.pool.add_task(ks, None)
         for i in range(self.copies-1):
-            ks = self.FakeKeepService(delay=old_div(i,10.0), will_succeed=True)
+            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)
             self.pool.add_task(ks, None)
         self.pool.join()
         self.assertEqual(self.pool.done(), self.copies-1)