services/crunchstat
services/dockercleaner
services/fuse
+services/fuse:py3
services/health
services/keep-web
services/keepproxy
echo -n 'Python3 pyconfig.h: '
find /usr/include -path '*/python3*/pyconfig.h' | egrep --max-count=1 . \
|| fatal "No Python3 pyconfig.h. Try: apt-get install python3-dev"
+ which netstat \
+ || fatal "No netstat. Try: apt-get install net-tools"
echo -n 'nginx: '
PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin" nginx -v \
|| fatal "No nginx. Try: apt-get install nginx"
tries=$((${tries}+1))
# $3 can name a path directory for us to use, including trailing
# slash; e.g., the bin/ subdirectory of a virtualenv.
- "${3}python" setup.py ${short:+--short-tests-only} test ${testargs[$1]}
+ if [[ -e "${3}activate" ]]; then
+ . "${3}activate"
+ fi
+ python setup.py ${short:+--short-tests-only} test ${testargs[$1]}
result=$?
if [[ ${tries} < 3 && ${result} == 137 ]]
then
sdk/cwl:py3
services/dockercleaner:py3
services/fuse
+ services/fuse:py3
services/nodemanager
tools/crunchstat-summary
tools/crunchstat-summary:py3
in seconds), print a warning on stderr before returning.
"""
try:
- subprocess.check_output(['which', 'lsof'])
+ subprocess.check_output(['which', 'netstat'])
except subprocess.CalledProcessError:
- print("WARNING: No `lsof` -- cannot wait for port to listen. "+
+ print("WARNING: No `netstat` -- cannot wait for port to listen. "+
"Sleeping 0.5 and hoping for the best.",
file=sys.stderr)
time.sleep(0.5)
return
deadline = time.time() + timeout
while time.time() < deadline:
- try:
- subprocess.check_output(
- ['lsof', '-t', '-i', 'tcp:'+str(port)])
- except subprocess.CalledProcessError:
- time.sleep(0.1)
- continue
- return True
+ if re.search(r'\ntcp.*:'+str(port)+' .* LISTEN *\n', subprocess.check_output(['netstat', '-Wln']).decode()):
+ return True
+ time.sleep(0.1)
if warn:
print(
"WARNING: Nothing is listening on port {} (waited {} seconds).".
for arg, val in keep_args.items():
keep_cmd.append("{}={}".format(arg, val))
- logf = open(_logfilename('keep{}'.format(n)), 'a')
- kp0 = subprocess.Popen(
- keep_cmd, stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+ with open(_logfilename('keep{}'.format(n)), 'a') as logf:
+ with open('/dev/null') as _stdin:
+ kp0 = subprocess.Popen(
+ keep_cmd, stdin=_stdin, stdout=logf, stderr=logf, close_fds=True)
with open(_pidfile('keep{}'.format(n)), 'w') as f:
f.write(str(kp0.pid))
pidfile = _pidfile('keepproxy')
if os.path.exists(pidfile):
try:
- os.kill(int(open(pidfile).read()), signal.SIGHUP)
+ with open(pidfile) as pid:
+ os.kill(int(pid.read()), signal.SIGHUP)
except OSError:
os.remove(pidfile)
# Returns 9 if program is not up.
def _getport(program):
try:
- return int(open(_portfile(program)).read())
+ with open(_portfile(program)) as prog:
+ return int(prog.read())
except IOError:
return 9
"""
+from __future__ import absolute_import
+from __future__ import division
+from future.utils import viewitems
+from future.utils import native
+from future.utils import listvalues
+from future import standard_library
+standard_library.install_aliases()
+from builtins import next
+from builtins import str
+from builtins import object
import os
import sys
import llfuse
import functools
import arvados.keep
from prometheus_client import Summary
-
-import Queue
+import queue
# Default _notify_queue has a limit of 1000 items, but it really needs to be
# unlimited to avoid deadlocks, see https://arvados.org/issues/3198#note-43 for
if hasattr(llfuse, 'capi'):
# llfuse < 0.42
- llfuse.capi._notify_queue = Queue.Queue()
+ llfuse.capi._notify_queue = queue.Queue()
else:
# llfuse >= 0.42
- llfuse._notify_queue = Queue.Queue()
+ llfuse._notify_queue = queue.Queue()
LLFUSE_VERSION_0 = llfuse.__version__.startswith('0')
-from fusedir import sanitize_filename, Directory, CollectionDirectory, TmpCollectionDirectory, MagicDirectory, TagsDirectory, ProjectDirectory, SharedDirectory, CollectionDirectoryBase
-from fusefile import StringFile, FuseArvadosFile
+from .fusedir import sanitize_filename, Directory, CollectionDirectory, TmpCollectionDirectory, MagicDirectory, TagsDirectory, ProjectDirectory, SharedDirectory, CollectionDirectoryBase
+from .fusefile import StringFile, FuseArvadosFile
_logger = logging.getLogger('arvados.arvados_fuse')
def cap_cache(self):
if self._total > self.cap:
- for ent in self._entries.values():
+ for ent in listvalues(self._entries):
if self._total < self.cap or len(self._entries) < self.min_entries:
break
self._remove(ent, True)
self._entries[key] = item
def __iter__(self):
- return self._entries.iterkeys()
+ return iter(self._entries.keys())
def items(self):
return self._entries.items()
if entry.has_ref(False):
# Only necessary if the kernel has previously done a lookup on this
# inode and hasn't yet forgotten about it.
- llfuse.invalidate_entry(entry.inode, name.encode(self.encoding))
+ llfuse.invalidate_entry(entry.inode, native(name.encode(self.encoding)))
def clear(self):
self.inode_cache.clear()
- for k,v in self._entries.items():
+ for k,v in viewitems(self._entries):
try:
v.finalize()
except Exception as e:
self.initlock.set()
def metric_samples(self):
- return self.fuse_time.collect()[0].samples
+ return self.fuse_time.collect()[0].samples
def metric_op_names(self):
ops = []
entry.st_size = e.size()
entry.st_blksize = 512
- entry.st_blocks = (entry.st_size/512)+1
+ entry.st_blocks = (entry.st_size // 512) + 1
if hasattr(entry, 'st_atime_ns'):
# llfuse >= 0.42
entry.st_atime_ns = int(e.atime() * 1000000000)
@lookup_time.time()
@catch_exceptions
def lookup(self, parent_inode, name, ctx=None):
- name = unicode(name, self.inodes.encoding)
+ name = str(name, self.inodes.encoding)
inode = None
if name == '.':
@create_time.time()
@catch_exceptions
def create(self, inode_parent, name, mode, flags, ctx=None):
+ name = name.decode()
_logger.debug("arv-mount create: parent_inode %i '%s' %o", inode_parent, name, mode)
p = self._check_writable(inode_parent)
@mkdir_time.time()
@catch_exceptions
def mkdir(self, inode_parent, name, mode, ctx=None):
+ name = name.decode()
_logger.debug("arv-mount mkdir: parent_inode %i '%s' %o", inode_parent, name, mode)
p = self._check_writable(inode_parent)
def unlink(self, inode_parent, name, ctx=None):
_logger.debug("arv-mount unlink: parent_inode %i '%s'", inode_parent, name)
p = self._check_writable(inode_parent)
- p.unlink(name)
+ p.unlink(name.decode())
@rmdir_time.time()
@catch_exceptions
def rmdir(self, inode_parent, name, ctx=None):
_logger.debug("arv-mount rmdir: parent_inode %i '%s'", inode_parent, name)
p = self._check_writable(inode_parent)
- p.rmdir(name)
+ p.rmdir(name.decode())
@rename_time.time()
@catch_exceptions
_logger.debug("arv-mount rename: old_parent_inode %i '%s' new_parent_inode %i '%s'", inode_parent_old, name_old, inode_parent_new, name_new)
src = self._check_writable(inode_parent_old)
dest = self._check_writable(inode_parent_new)
- dest.rename(name_old, name_new, src)
+ dest.rename(name_old.decode(), name_new.decode(), src)
@flush_time.time()
@catch_exceptions
#
# SPDX-License-Identifier: AGPL-3.0
+from future.utils import native_str
+from builtins import range
+from builtins import object
import argparse
import arvados
import daemon
with "--".
""")
self.add_argument('--version', action='version',
- version="%s %s" % (sys.argv[0], __version__),
+ version=u"%s %s" % (sys.argv[0], __version__),
help='Print version and exit.')
self.add_argument('mountpoint', type=str, help="""Mount point.""")
self.add_argument('--allow-other', action='store_true',
if self.args.replace:
unmount(path=self.args.mountpoint,
timeout=self.args.unmount_timeout)
- llfuse.init(self.operations, self.args.mountpoint, self._fuse_options())
+ llfuse.init(self.operations, native_str(self.args.mountpoint), self._fuse_options())
if self.daemon:
daemon.DaemonContext(
working_directory=os.path.dirname(self.args.mountpoint),
- files_preserve=range(
- 3, resource.getrlimit(resource.RLIMIT_NOFILE)[1])
+ files_preserve=list(range(
+ 3, resource.getrlimit(resource.RLIMIT_NOFILE)[1]))
).open()
if self.listen_for_events and not self.args.disable_event_listening:
self.operations.listen_for_events()
#
# SPDX-License-Identifier: AGPL-3.0
+from builtins import str
+from builtins import object
import sys
import time
from collections import namedtuple
Stat = namedtuple("Stat", ['name', 'get'])
-class StatWriter(object):
+class StatWriter(object):
def __init__(self, prefix, interval, stats):
self.prefix = prefix
self.interval = interval
def statlogger(interval, keep, ops):
calls = StatWriter("keepcalls", interval, [
- Stat("put", keep.put_counter.get),
+ Stat("put", keep.put_counter.get),
Stat("get", keep.get_counter.get)
])
net = StatWriter("net:keep0", interval, [
Stat("rx", keep.download_counter.get)
])
cache = StatWriter("keepcache", interval, [
- Stat("hit", keep.hits_counter.get),
+ Stat("hit", keep.hits_counter.get),
Stat("miss", keep.misses_counter.get)
])
fuseops = StatWriter("fuseops", interval, [
- Stat("write", ops.write_ops_counter.get),
+ Stat("write", ops.write_ops_counter.get),
Stat("read", ops.read_ops_counter.get)
])
fusetimes = []
- for cur_op in ops.metric_op_names():
+ for cur_op in ops.metric_op_names():
name = "fuseop:{0}".format(cur_op)
fusetimes.append(StatWriter(name, interval, [
Stat("count", ops.metric_count_func(cur_op)),
#
# SPDX-License-Identifier: AGPL-3.0
+from builtins import object
import time
import ciso8601
import calendar
#
# SPDX-License-Identifier: AGPL-3.0
+from __future__ import absolute_import
+from __future__ import division
+from future.utils import viewitems
+from builtins import dict
import logging
import re
import time
import errno
import time
-from fusefile import StringFile, ObjectFile, FuncToJSONFile, FuseArvadosFile
-from fresh import FreshBase, convertTime, use_counter, check_update
+from .fusefile import StringFile, ObjectFile, FuncToJSONFile, FuseArvadosFile
+from .fresh import FreshBase, convertTime, use_counter, check_update
import arvados.collection
from arvados.util import portable_data_hash_pattern, uuid_pattern, collection_uuid_pattern, group_uuid_pattern, user_uuid_pattern, link_uuid_pattern
def in_use(self):
if super(Directory, self).in_use():
return True
- for v in self._entries.itervalues():
+ for v in self._entries.values():
if v.in_use():
return True
return False
def has_ref(self, only_children):
if super(Directory, self).has_ref(only_children):
return True
- for v in self._entries.itervalues():
+ for v in self._entries.values():
if v.has_ref(False):
return True
return False
# Find self on the parent in order to invalidate this path.
# Calling the public items() method might trigger a refresh,
# which we definitely don't want, so read the internal dict directly.
- for k,v in parent._entries.items():
+ for k,v in viewitems(parent._entries):
if v is self:
self.inodes.invalidate_entry(parent, k)
break
def populate(self, mtime):
self._mtime = mtime
self.collection.subscribe(self.on_event)
- for entry, item in self.collection.items():
+ for entry, item in viewitems(self.collection):
self.new_entry(entry, item, self.mtime())
def writable(self):
self.collection_record = None
self._poll = True
try:
- self._poll_time = (api._rootDesc.get('blobSignatureTtl', 60*60*2)/2)
+ self._poll_time = (api._rootDesc.get('blobSignatureTtl', 60*60*2) // 2)
except:
_logger.debug("Error getting blobSignatureTtl from discovery document: %s", sys.exc_info()[0])
self._poll_time = 60*60
# end with llfuse.lock_released, re-acquire lock
- self.merge(contents.items(),
+ self.merge(viewitems(contents),
lambda i: i[0],
lambda a, i: a.uuid() == i[1]['uuid'],
lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i[1], poll=self._poll, poll_time=self._poll_time))
#
# SPDX-License-Identifier: AGPL-3.0
+from __future__ import absolute_import
+from builtins import bytes
import json
import llfuse
import logging
import re
import time
-from fresh import FreshBase, convertTime
+from .fresh import FreshBase, convertTime
_logger = logging.getLogger('arvados.arvados_fuse')
return len(self.contents)
def readfrom(self, off, size, num_retries=0):
- return self.contents[off:(off+size)]
+ return bytes(self.contents[off:(off+size)], encoding='utf-8')
class ObjectFile(StringFile):
],
install_requires=[
'arvados-python-client{}'.format(pysdk_dep),
- # llfuse 1.3.4 fails to install via pip
- 'llfuse >=1.2, <1.3.4',
+ 'llfuse >= 1.3.6',
+ 'future',
'python-daemon',
'ciso8601 >= 2.0.0',
'setuptools',
extras_require={
':python_version<"3"': ['pytz'],
},
+ classifiers=[
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 3',
+ ],
test_suite='tests',
tests_require=['pbr<1.7.0', 'mock>=1.0', 'PyYAML'],
zip_safe=False
#
# SPDX-License-Identifier: AGPL-3.0
+from __future__ import print_function
+from __future__ import absolute_import
+from builtins import str
+from builtins import range
from multiprocessing import Process
import os
import subprocess
import sys
-import prof
+from . import prof
def fn(n):
return "file%i" % n
def createfiles(d, n):
- for j in xrange(1, 5):
- print "Starting small file %s %i, %i" % (d, n, j)
+ for j in range(1, 5):
+ print("Starting small file %s %i, %i" % (d, n, j))
if d:
os.mkdir(d)
ld = os.listdir('.')
if d not in ld:
- print "ERROR %s missing" % d
+ print("ERROR %s missing" % d)
os.chdir(d)
- for i in xrange(n, n+10):
+ for i in range(n, n+10):
with open(fn(i), "w") as f:
f.write(fn(i))
ld = os.listdir('.')
- for i in xrange(n, n+10):
+ for i in range(n, n+10):
if fn(i) not in ld:
- print "ERROR %s missing" % fn(i)
+ print("ERROR %s missing" % fn(i))
- for i in xrange(n, n+10):
+ for i in range(n, n+10):
with open(fn(i), "r") as f:
if f.read() != fn(i):
- print "ERROR %s doesn't have expected contents" % fn(i)
+ print("ERROR %s doesn't have expected contents" % fn(i))
- for i in xrange(n, n+10):
+ for i in range(n, n+10):
os.remove(fn(i))
ld = os.listdir('.')
- for i in xrange(n, n+10):
+ for i in range(n, n+10):
if fn(i) in ld:
- print "ERROR %s should have been removed" % fn(i)
+ print("ERROR %s should have been removed" % fn(i))
if d:
os.chdir('..')
os.rmdir(d)
ld = os.listdir('.')
if d in ld:
- print "ERROR %s should have been removed" % d
+ print("ERROR %s should have been removed" % d)
def createbigfile(d, n):
- for j in xrange(1, 5):
- print "Starting big file %s %i, %i" % (d, n, j)
+ for j in range(1, 5):
+ print("Starting big file %s %i, %i" % (d, n, j))
i = n
if d:
os.mkdir(d)
ld = os.listdir('.')
if d not in ld:
- print "ERROR %s missing" % d
+ print("ERROR %s missing" % d)
os.chdir(d)
with open(fn(i), "w") as f:
- for j in xrange(0, 1000):
+ for j in range(0, 1000):
f.write((str(j) + fn(i)) * 10000)
ld = os.listdir('.')
if fn(i) not in ld:
- print "ERROR %s missing" % fn(i)
+ print("ERROR %s missing" % fn(i))
with open(fn(i), "r") as f:
- for j in xrange(0, 1000):
+ for j in range(0, 1000):
expect = (str(j) + fn(i)) * 10000
if f.read(len(expect)) != expect:
- print "ERROR %s doesn't have expected contents" % fn(i)
+ print("ERROR %s doesn't have expected contents" % fn(i))
os.remove(fn(i))
ld = os.listdir('.')
if fn(i) in ld:
- print "ERROR %s should have been removed" % fn(i)
+ print("ERROR %s should have been removed" % fn(i))
if d:
os.chdir('..')
os.rmdir(d)
ld = os.listdir('.')
if d in ld:
- print "ERROR %s should have been removed" % d
+ print("ERROR %s should have been removed" % d)
def do_ls():
with open("/dev/null", "w") as nul:
- for j in xrange(1, 50):
+ for j in range(1, 50):
subprocess.call(["ls", "-l"], stdout=nul, stderr=nul)
def runit(target, indir):
procs = []
- for n in xrange(0, 20):
+ for n in range(0, 20):
if indir:
p = Process(target=target, args=("dir%i" % n, n*10,))
else:
p.join()
if os.listdir('.'):
- print "ERROR there are left over files in the directory"
+ print("ERROR there are left over files in the directory")
if __name__ == '__main__':
if os.listdir('.'):
- print "ERROR starting directory is not empty"
+ print("ERROR starting directory is not empty")
sys.exit()
- print "Single directory small files"
+ print("Single directory small files")
with prof.CountTime():
runit(createfiles, False)
- print "Separate directories small files"
+ print("Separate directories small files")
with prof.CountTime():
runit(createfiles, True)
- print "Single directory large files"
+ print("Single directory large files")
with prof.CountTime():
runit(createbigfile, False)
- print "Separate directories large files"
+ print("Separate directories large files")
with prof.CountTime():
runit(createbigfile, True)
#
# SPDX-License-Identifier: AGPL-3.0
+from __future__ import absolute_import
import arvados
import arvados_fuse
import arvados_fuse.command
import logging
import multiprocessing
import os
-import run_test_server
+from . import run_test_server
import signal
import sys
import tempfile
#
# SPDX-License-Identifier: AGPL-3.0
+from __future__ import absolute_import
import arvados
import arvados_fuse as fuse
import arvados.safeapi
import logging
import multiprocessing
import os
-import run_test_server
+from . import run_test_server
import shutil
import signal
import subprocess
path = self.mounttmp
if subdir:
path = os.path.join(path, subdir)
- self.assertEqual(sorted(expect_content), sorted(llfuse.listdir(path)))
+ self.assertEqual(sorted(expect_content), sorted(llfuse.listdir(str(path))))
#
# SPDX-License-Identifier: AGPL-3.0
+from __future__ import absolute_import
+from future.utils import viewitems
+from builtins import str
+from builtins import range
import arvados
import arvados_fuse as fuse
import llfuse
logger = logging.getLogger('arvados.arv-mount')
-from performance_profiler import profiled
+from .performance_profiler import profiled
def fuse_createCollectionWithMultipleBlocks(mounttmp, streams=1, files_per_stream=1, data='x'):
class Test(unittest.TestCase):
for j in range(0, files_per_stream):
files[os.path.join(self.mounttmp, collection, 'file'+str(j)+'.txt')] = data
- for k, v in files.items():
+ for k, v in viewItems(files):
with open(os.path.join(self.mounttmp, collection, k)) as f:
self.assertEqual(v, f.read())
#
# SPDX-License-Identifier: AGPL-3.0
+from __future__ import print_function
+from builtins import object
import time
class CountTime(object):
sec = (time.time() - self.start)
th = ""
if self.size:
- th = "throughput %s/sec" % (self.size / sec)
- print "%s time %s micoseconds %s" % (self.tag, sec*1000000, th)
+ th = "throughput %s/sec" % (self.size // sec)
+ print("%s time %s micoseconds %s" % (self.tag, sec*1000000, th))
#
# SPDX-License-Identifier: AGPL-3.0
+from builtins import range
import arvados
import arvados.collection
import arvados_fuse
#
# SPDX-License-Identifier: AGPL-3.0
+from __future__ import absolute_import
+from __future__ import print_function
import arvados
import arvados_fuse
import arvados_fuse.command
import logging
import mock
import os
-import run_test_server
+from . import run_test_server
import sys
import tempfile
import unittest
def noexit(func):
"""If argparse or arvados_fuse tries to exit, fail the test instead"""
- class SystemExitCaught(StandardError):
+ class SystemExitCaught(Exception):
pass
@functools.wraps(func)
def wrapper(*args, **kwargs):
@contextlib.contextmanager
def nostderr():
- orig, sys.stderr = sys.stderr, open(os.devnull, 'w')
- try:
- yield
- finally:
- sys.stderr = orig
+ with open(os.devnull, 'w') as dn:
+ orig, sys.stderr = sys.stderr, dn
+ try:
+ yield
+ finally:
+ sys.stderr = orig
class MountArgsTest(unittest.TestCase):
e = self.check_ent_type(arvados_fuse.MagicDirectory, 'by_id')
e = self.check_ent_type(arvados_fuse.StringFile, 'README')
- readme = e.readfrom(0, -1)
+ readme = e.readfrom(0, -1).decode()
self.assertRegexpMatches(readme, r'active-user@arvados\.local')
self.assertRegexpMatches(readme, r'\n$')
e = self.check_ent_type(arvados_fuse.StringFile, 'by_id', 'README')
- txt = e.readfrom(0, -1)
+ txt = e.readfrom(0, -1).decode()
self.assertRegexpMatches(txt, r'portable data hash')
self.assertRegexpMatches(txt, r'\n$')
self.assertEqual(True, self.mnt.listen_for_events)
def test_version_argument(self):
- orig, sys.stderr = sys.stderr, io.BytesIO()
+ # The argparse version action prints to stderr in Python 2 and stdout
+ # in Python 3.4 and up. Write both to the same stream so the test can pass
+ # in both cases.
+ origerr = sys.stderr
+ origout = sys.stdout
+ sys.stderr = io.StringIO()
+ sys.stdout = sys.stderr
+
with self.assertRaises(SystemExit):
args = arvados_fuse.command.ArgumentParser().parse_args(['--version'])
- self.assertRegexpMatches(sys.stderr.getvalue(), "[0-9]+\.[0-9]+\.[0-9]+")
- sys.stderr = orig
+ self.assertRegexpMatches(sys.stdout.getvalue(), "[0-9]+\.[0-9]+\.[0-9]+")
+ sys.stderr.close()
+ sys.stderr = origerr
+ sys.stdout = origout
@noexit
@mock.patch('arvados.events.subscribe')
#
# SPDX-License-Identifier: AGPL-3.0
+from __future__ import absolute_import
import subprocess
-from integration_test import IntegrationTest
+from .integration_test import IntegrationTest
class CrunchstatTest(IntegrationTest):
'--crunchstat-interval', '1',
self.mnt,
'--exec', 'echo', 'ok'])
- self.assertEqual("ok\n", output)
+ self.assertEqual(b"ok\n", output)
#
# SPDX-License-Identifier: AGPL-3.0
+from __future__ import absolute_import
import arvados_fuse.command
import json
import multiprocessing
import os
-import run_test_server
+from . import run_test_server
import tempfile
import unittest
quote(os.path.join(self.mnt, 'zzz', 'foo.txt')),
quote(os.path.join(self.mnt, 'zzz', '.arvados#collection')),
quote(os.path.join(self.okfile)))]))
- self.assertRegexpMatches(
- json.load(open(self.okfile))['manifest_text'],
- r' 0:3:foo.txt\n')
+ with open(self.okfile) as f:
+ self.assertRegexpMatches(
+ json.load(f)['manifest_text'],
+ r' 0:3:foo.txt\n')
#
# SPDX-License-Identifier: AGPL-3.0
+from __future__ import absolute_import
+from future.utils import viewitems
+from builtins import str
+from builtins import object
import json
import llfuse
import logging
import arvados
import arvados_fuse as fuse
-import run_test_server
+from . import run_test_server
-from mount_test_base import MountTestBase
+from .mount_test_base import MountTestBase
logger = logging.getLogger('arvados.arv-mount')
self.done = False
return self
- def next(self):
+ def __next__(self):
if self.done:
raise StopIteration
return self.attempt
'dir2/dir3/thing7.txt': 'data 7',
'dir2/dir3/thing8.txt': 'data 8'}
- for k, v in files.items():
- with open(os.path.join(self.mounttmp, k)) as f:
- self.assertEqual(v, f.read())
+ for k, v in viewitems(files):
+ with open(os.path.join(self.mounttmp, k), 'rb') as f:
+ self.assertEqual(v, f.read().decode())
class FuseMagicTest(MountTestBase):
files = {}
files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'
- for k, v in files.items():
- with open(os.path.join(self.mounttmp, k)) as f:
- self.assertEqual(v, f.read())
+ for k, v in viewitems(files):
+ with open(os.path.join(self.mounttmp, k), 'rb') as f:
+ self.assertEqual(v, f.read().decode())
class FuseTagsTest(MountTestBase):
# check mtime on collection
st = os.stat(baz_path)
try:
- mtime = st.st_mtime_ns / 1000000000
+ mtime = st.st_mtime_ns // 1000000000
except AttributeError:
mtime = st.st_mtime
self.assertEqual(mtime, 1391448174)
self.make_mount(fuse.SharedDirectory,
exclude=self.api.users().current().execute()['uuid'])
keep = arvados.keep.KeepClient()
- keep.put("baz")
+ keep.put("baz".encode())
self.pool.apply(fuseSharedTestHelper, (self.mounttmp,))
'anonymously_accessible_project']
found_in = 0
found_not_in = 0
- for name, item in run_test_server.fixture('collections').iteritems():
+ for name, item in viewitems(run_test_server.fixture('collections')):
if 'name' not in item:
pass
elif item['owner_uuid'] == public_project['uuid']:
api.keep.get.side_effect = Exception('Keep fail')
def runTest(self):
- self.make_mount(fuse.MagicDirectory)
+ with mock.patch('arvados_fuse.fresh.FreshBase._poll_time', new_callable=mock.PropertyMock, return_value=60) as mock_poll_time:
+ self.make_mount(fuse.MagicDirectory)
- self.operations.inodes.inode_cache.cap = 1
- self.operations.inodes.inode_cache.min_entries = 2
+ self.operations.inodes.inode_cache.cap = 1
+ self.operations.inodes.inode_cache.min_entries = 2
- with self.assertRaises(OSError):
- llfuse.listdir(os.path.join(self.mounttmp, self.testcollection))
+ with self.assertRaises(OSError):
+ llfuse.listdir(os.path.join(self.mounttmp, self.testcollection))
- llfuse.listdir(os.path.join(self.mounttmp, self.testcollection))
+ llfuse.listdir(os.path.join(self.mounttmp, self.testcollection))
class FuseUnitTest(unittest.TestCase):
files = {}
files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'
- for k, v in files.items():
- with open(os.path.join(self.mounttmp, k)) as f:
- self.assertEqual(v, f.read())
+ for k, v in viewitems(files):
+ with open(os.path.join(self.mounttmp, k), 'rb') as f:
+ self.assertEqual(v, f.read().decode())
# look up using uuid should fail when pdh_only is set
if pdh_only is True:
toks[4]
for toks in [
line.split(' ')
- for line in subprocess.check_output("mount").split("\n")
+ for line in subprocess.check_output("mount").decode().split("\n")
]
if len(toks) > 4 and toks[2] == mnt
])
#
# SPDX-License-Identifier: AGPL-3.0
+from __future__ import absolute_import
+from future import standard_library
+standard_library.install_aliases()
import arvados
import arvados_fuse.command
import json
import mock
import os
import pycurl
-import Queue
-import run_test_server
+import queue
+from . import run_test_server
import tempfile
import unittest
def test_retry_write(self, sleep):
mockedCurl = mock.Mock(spec=pycurl.Curl(), wraps=pycurl.Curl())
mockedCurl.perform.side_effect = Exception('mock error (ok)')
- q = Queue.Queue()
+ q = queue.Queue()
q.put(mockedCurl)
q.put(pycurl.Curl())
q.put(pycurl.Curl())
#
# SPDX-License-Identifier: AGPL-3.0
+from builtins import range
import arvados
import arvados_fuse
import arvados_fuse.command
def current_manifest(tmpdir):
- return json.load(open(
- os.path.join(tmpdir, '.arvados#collection')
- ))['manifest_text']
+ with open(os.path.join(tmpdir, '.arvados#collection')) as tmp:
+ return json.load(tmp)['manifest_text']
class TmpCollectionTest(IntegrationTest):
#
# SPDX-License-Identifier: AGPL-3.0
+from builtins import range
import apiclient
import arvados
import arvados_fuse
def fake_open(self, operations, *args, **kwargs):
self.time_now += 86400*13
logger.debug('opening file at time=%f', self.time_now)
- return self.orig_open(operations, *args, **kwargs)
+ return TokenExpiryTest.orig_open(operations, *args, **kwargs)
@mock.patch.object(arvados_fuse.Operations, 'open', autospec=True)
- @mock.patch('time.time')
+ @mock.patch.object(time, 'time', return_value=0)
@mock.patch('arvados.keep.KeepClient.get')
@IntegrationTest.mount(argv=['--mount-by-id', 'zzz'])
def test_refresh_old_manifest(self, mocked_get, mocked_time, mocked_open):
# blobSignatureTtl seconds elapse between open() and
# read(). See https://dev.arvados.org/issues/10008
- mocked_get.return_value = 'fake data'
+ mocked_get.return_value = b'fake data'
mocked_time.side_effect = self.fake_time
mocked_open.side_effect = self.fake_open
#
# SPDX-License-Identifier: AGPL-3.0
+from __future__ import absolute_import
+from builtins import bytes
import arvados_fuse.unmount
import os
import subprocess
import time
import unittest
-from integration_test import IntegrationTest
+from .integration_test import IntegrationTest
class UnmountTest(IntegrationTest):
def setUp(self):
self.mnt,
'--exec', 'true'])
for m in subprocess.check_output(['mount']).splitlines():
- self.assertNotIn(' '+self.mnt+' ', m)
+ expected = bytes(' ' + self.mnt + ' ', encoding='utf-8')
+ self.assertNotIn(expected, m)
def _mounted(self, mounts):
all_mounts = subprocess.check_output(['mount'])
return [m for m in mounts
- if ' '+m+' ' in all_mounts]
+ if bytes(' ' + m + ' ', encoding='utf-8') in all_mounts]
def _wait_for_mounts(self, mounts):
deadline = time.time() + 10
linkchecker python3-virtualenv python-virtualenv xvfb iceweasel \
libgnutls28-dev python3-dev vim cadaver cython gnupg dirmngr \
libsecret-1-dev r-base r-cran-testthat libxml2-dev pandoc \
- python3-setuptools python3-pip openjdk-8-jdk bsdmainutils && \
+ python3-setuptools python3-pip openjdk-8-jdk bsdmainutils net-tools&& \
apt-get clean
ENV RUBYVERSION_MINOR 2.3