import ciso8601
import collections
import functools
+import arvados.keep
import Queue
llfuse.capi._notify_queue = Queue.Queue()
-from fusedir import sanitize_filename, Directory, CollectionDirectory, MagicDirectory, TagsDirectory, ProjectDirectory, SharedDirectory, CollectionDirectoryBase
+from fusedir import sanitize_filename, Directory, CollectionDirectory, TmpCollectionDirectory, MagicDirectory, TagsDirectory, ProjectDirectory, SharedDirectory, CollectionDirectoryBase
from fusefile import StringFile, FuseArvadosFile
_logger = logging.getLogger('arvados.arvados_fuse')
self._total -= obj.cache_size
del self._entries[obj.cache_priority]
if obj.cache_uuid:
- del self._by_uuid[obj.cache_uuid]
+ self._by_uuid[obj.cache_uuid].remove(obj)
+ if not self._by_uuid[obj.cache_uuid]:
+ del self._by_uuid[obj.cache_uuid]
obj.cache_uuid = None
if clear:
_logger.debug("InodeCache cleared %i total now %i", obj.inode, self._total)
self._entries[obj.cache_priority] = obj
obj.cache_uuid = obj.uuid()
if obj.cache_uuid:
- self._by_uuid[obj.cache_uuid] = obj
+ if obj.cache_uuid not in self._by_uuid:
+ self._by_uuid[obj.cache_uuid] = [obj]
+ else:
+ if obj not in self._by_uuid[obj.cache_uuid]:
+ self._by_uuid[obj.cache_uuid].append(obj)
self._total += obj.objsize()
- _logger.debug("InodeCache touched %i (size %i) total now %i", obj.inode, obj.objsize(), self._total)
+ _logger.debug("InodeCache touched %i (size %i) (uuid %s) total now %i", obj.inode, obj.objsize(), obj.cache_uuid, self._total)
self.cap_cache()
else:
obj.cache_priority = None
if obj.persisted() and obj.cache_priority in self._entries:
self._remove(obj, True)
- def find(self, uuid):
- return self._by_uuid.get(uuid)
+ def find_by_uuid(self, uuid):
+ return self._by_uuid.get(uuid, [])
+
+ def clear(self):
+ self._entries.clear()
+ self._by_uuid.clear()
+ self._total = 0
class Inodes(object):
"""Manage the set of inodes. This is the mapping from a numeric id
def invalidate_entry(self, inode, name):
llfuse.invalidate_entry(inode, name)
+ def clear(self):
+ self.inode_cache.clear()
+
+ for k,v in self._entries.items():
+ try:
+ v.finalize()
+ except Exception as e:
+ _logger.exception("Error during finalize of inode %i", k)
+
+ self._entries.clear()
+
def catch_exceptions(orig_func):
"""Catch uncaught exceptions and log them consistently."""
"""
- def __init__(self, uid, gid, encoding="utf-8", inode_cache=None, num_retries=4, enable_write=False):
+ def __init__(self, uid, gid, api_client, encoding="utf-8", inode_cache=None, num_retries=4, enable_write=False):
super(Operations, self).__init__()
+ self._api_client = api_client
+
if not inode_cache:
inode_cache = InodeCache(cap=256*1024*1024)
self.inodes = Inodes(inode_cache, encoding=encoding)
# is fully initialized should wait() on this event object.
self.initlock = threading.Event()
+ # If we get overlapping shutdown events (e.g., fusermount -u
+ # -z and operations.destroy()) llfuse calls forget() on inodes
+ # that have already been deleted. To avoid this, we make
+ # forget() a no-op if called after destroy().
+ self._shutdown_started = threading.Event()
+
self.num_retries = num_retries
+ self.read_counter = arvados.keep.Counter()
+ self.write_counter = arvados.keep.Counter()
+ self.read_ops_counter = arvados.keep.Counter()
+ self.write_ops_counter = arvados.keep.Counter()
+
self.events = None
def init(self):
@catch_exceptions
def destroy(self):
- if self.events:
- self.events.close()
- self.events = None
+ with llfuse.lock:
+ self._shutdown_started.set()
+ if self.events:
+ self.events.close()
+ self.events = None
- for k,v in self.inodes.items():
- try:
- v.finalize()
- except Exception as e:
- _logger.exception("Error during finalize of inode %i", k)
- self.inodes = None
+ self.inodes.clear()
def access(self, inode, mode, ctx):
return True
- def listen_for_events(self, api_client):
- self.events = arvados.events.subscribe(api_client,
+ def listen_for_events(self):
+ self.events = arvados.events.subscribe(self._api_client,
[["event_type", "in", ["create", "update", "delete"]]],
self.on_event)
@catch_exceptions
def on_event(self, ev):
- if 'event_type' in ev:
- with llfuse.lock:
- item = self.inodes.inode_cache.find(ev["object_uuid"])
- if item is not None:
- item.invalidate()
- if ev["object_kind"] == "arvados#collection":
- new_attr = ev.get("properties") and ev["properties"].get("new_attributes") and ev["properties"]["new_attributes"]
-
- # new_attributes.modified_at currently lacks subsecond precision (see #6347) so use event_at which
- # should always be the same.
- #record_version = (new_attr["modified_at"], new_attr["portable_data_hash"]) if new_attr else None
- record_version = (ev["event_at"], new_attr["portable_data_hash"]) if new_attr else None
-
- item.update(to_record_version=record_version)
- else:
- item.update()
-
- oldowner = ev.get("properties") and ev["properties"].get("old_attributes") and ev["properties"]["old_attributes"].get("owner_uuid")
- olditemparent = self.inodes.inode_cache.find(oldowner)
- if olditemparent is not None:
- olditemparent.invalidate()
- olditemparent.update()
-
- itemparent = self.inodes.inode_cache.find(ev["object_owner_uuid"])
- if itemparent is not None:
- itemparent.invalidate()
- itemparent.update()
+ if 'event_type' not in ev:
+ return
+ with llfuse.lock:
+ for item in self.inodes.inode_cache.find_by_uuid(ev["object_uuid"]):
+ item.invalidate()
+ if ev["object_kind"] == "arvados#collection":
+ new_attr = (ev.get("properties") and
+ ev["properties"].get("new_attributes") and
+ ev["properties"]["new_attributes"])
+
+ # new_attributes.modified_at currently lacks
+ # subsecond precision (see #6347) so use event_at
+ # which should always be the same.
+ record_version = (
+ (ev["event_at"], new_attr["portable_data_hash"])
+ if new_attr else None)
+
+ item.update(to_record_version=record_version)
+ else:
+ item.update()
+
+ oldowner = (
+ ev.get("properties") and
+ ev["properties"].get("old_attributes") and
+ ev["properties"]["old_attributes"].get("owner_uuid"))
+ newowner = ev["object_owner_uuid"]
+ for parent in (
+ self.inodes.inode_cache.find_by_uuid(oldowner) +
+ self.inodes.inode_cache.find_by_uuid(newowner)):
+ parent.invalidate()
+ parent.update()
@catch_exceptions
entry = llfuse.EntryAttributes()
entry.st_ino = inode
entry.generation = 0
- entry.entry_timeout = 60
- entry.attr_timeout = 60
+ entry.entry_timeout = 60 if e.allow_dirent_cache else 0
+ entry.attr_timeout = 60 if e.allow_attr_cache else 0
entry.st_mode = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
if isinstance(e, Directory):
@catch_exceptions
def forget(self, inodes):
+ if self._shutdown_started.is_set():
+ return
for inode, nlookup in inodes:
ent = self.inodes[inode]
_logger.debug("arv-mount forget: inode %i nlookup %i ref_count %i", inode, nlookup, ent.ref_count)
@catch_exceptions
def read(self, fh, off, size):
_logger.debug("arv-mount read %i %i %i", fh, off, size)
+ self.read_ops_counter.add(1)
+
if fh in self._filehandles:
handle = self._filehandles[fh]
else:
self.inodes.touch(handle.obj)
- return handle.obj.readfrom(off, size, self.num_retries)
+ r = handle.obj.readfrom(off, size, self.num_retries)
+ if r:
+ self.read_counter.add(len(r))
+ return r
@catch_exceptions
def write(self, fh, off, buf):
_logger.debug("arv-mount write %i %i %i", fh, off, len(buf))
+ self.write_ops_counter.add(1)
+
if fh in self._filehandles:
handle = self._filehandles[fh]
else:
self.inodes.touch(handle.obj)
- return handle.obj.writeto(off, buf, self.num_retries)
+ w = handle.obj.writeto(off, buf, self.num_retries)
+ if w:
+ self.write_counter.add(w)
+ return w
@catch_exceptions
def release(self, fh):