import collections
import functools
+import Queue
+
+# Default _notify_queue has a limit of 1000 items, but it really needs to be
+# unlimited to avoid deadlocks, see https://arvados.org/issues/3198#note-43 for
+# details.
+
+llfuse.capi._notify_queue = Queue.Queue()
+
from fusedir import sanitize_filename, Directory, CollectionDirectory, MagicDirectory, TagsDirectory, ProjectDirectory, SharedDirectory, CollectionDirectoryBase
from fusefile import StringFile, FuseArvadosFile
self.obj.dec_use()
def flush(self):
- return self.obj.flush()
+ if self.obj.writable():
+ return self.obj.flush()
class FileHandle(Handle):
self._total -= obj.cache_size
del self._entries[obj.cache_priority]
if obj.cache_uuid:
- del self._by_uuid[obj.cache_uuid]
+ self._by_uuid[obj.cache_uuid].remove(obj)
+ if not self._by_uuid[obj.cache_uuid]:
+ del self._by_uuid[obj.cache_uuid]
obj.cache_uuid = None
if clear:
_logger.debug("InodeCache cleared %i total now %i", obj.inode, self._total)
self._entries[obj.cache_priority] = obj
obj.cache_uuid = obj.uuid()
if obj.cache_uuid:
- self._by_uuid[obj.cache_uuid] = obj
+ if obj.cache_uuid not in self._by_uuid:
+ self._by_uuid[obj.cache_uuid] = [obj]
+ else:
+ if obj not in self._by_uuid[obj.cache_uuid]:
+ self._by_uuid[obj.cache_uuid].append(obj)
self._total += obj.objsize()
- _logger.debug("InodeCache touched %i (size %i) total now %i", obj.inode, obj.objsize(), self._total)
+ _logger.debug("InodeCache touched %i (size %i) (uuid %s) total now %i", obj.inode, obj.objsize(), obj.cache_uuid, self._total)
self.cap_cache()
else:
obj.cache_priority = None
def find(self, uuid):
return self._by_uuid.get(uuid)
+ def clear(self):
+ self._entries.clear()
+ self._by_uuid.clear()
+ self._total = 0
+
class Inodes(object):
"""Manage the set of inodes. This is the mapping from a numeric id
to a concrete File or Directory object"""
self._counter = itertools.count(llfuse.ROOT_INODE)
self.inode_cache = inode_cache
self.encoding = encoding
+ self.deferred_invalidations = []
def __getitem__(self, item):
return self._entries[item]
def del_entry(self, entry):
if entry.ref_count == 0:
- _logger.debug("Deleting inode %i", entry.inode)
self.inode_cache.unmanage(entry)
- llfuse.invalidate_inode(entry.inode)
- entry.finalize()
del self._entries[entry.inode]
+ with llfuse.lock_released:
+ entry.finalize()
+ self.invalidate_inode(entry.inode)
entry.inode = None
else:
entry.dead = True
_logger.debug("del_entry on inode %i with refcount %i", entry.inode, entry.ref_count)
+ def invalidate_inode(self, inode):
+ llfuse.invalidate_inode(inode)
+
+ def invalidate_entry(self, inode, name):
+ llfuse.invalidate_entry(inode, name)
+
+ def clear(self):
+ self.inode_cache.clear()
+
+ for k,v in self._entries.items():
+ try:
+ v.finalize()
+ except Exception as e:
+ _logger.exception("Error during finalize of inode %i", k)
+
+ self._entries.clear()
+
def catch_exceptions(orig_func):
"""Catch uncaught exceptions and log them consistently."""
raise
except EnvironmentError as e:
raise llfuse.FUSEError(e.errno)
+ except arvados.errors.KeepWriteError as e:
+ _logger.error("Keep write error: " + str(e))
+ raise llfuse.FUSEError(errno.EIO)
+ except arvados.errors.NotFoundError as e:
+ _logger.error("Block not found error: " + str(e))
+ raise llfuse.FUSEError(errno.EIO)
except:
_logger.exception("Unhandled exception during FUSE operation")
raise llfuse.FUSEError(errno.EIO)
"""
- def __init__(self, uid, gid, encoding="utf-8", inode_cache=None, num_retries=4):
+ def __init__(self, uid, gid, encoding="utf-8", inode_cache=None, num_retries=4, enable_write=False):
super(Operations, self).__init__()
if not inode_cache:
self.inodes = Inodes(inode_cache, encoding=encoding)
self.uid = uid
self.gid = gid
+ self.enable_write = enable_write
# dict of inode to filehandle
self._filehandles = {}
self.events.close()
self.events = None
- for k,v in self.inodes.items():
- v.finalize()
- self.inodes = None
+ self.inodes.clear()
def access(self, inode, mode, ctx):
return True
def on_event(self, ev):
if 'event_type' in ev:
with llfuse.lock:
- item = self.inodes.inode_cache.find(ev["object_uuid"])
- if item is not None:
- item.invalidate()
- if ev["object_kind"] == "arvados#collection":
- new_attr = ev.get("properties") and ev["properties"].get("new_attributes") and ev["properties"]["new_attributes"]
- record_version = (new_attr["modified_at"], new_attr["portable_data_hash"]) if new_attr else None
- item.update(to_record_version=record_version)
- else:
- item.update()
+ items = self.inodes.inode_cache.find(ev["object_uuid"])
+ if items is not None:
+ for item in items:
+ item.invalidate()
+ if ev["object_kind"] == "arvados#collection":
+ new_attr = ev.get("properties") and ev["properties"].get("new_attributes") and ev["properties"]["new_attributes"]
+
+ # new_attributes.modified_at currently lacks subsecond precision (see #6347) so use event_at which
+ # should always be the same.
+ #record_version = (new_attr["modified_at"], new_attr["portable_data_hash"]) if new_attr else None
+ record_version = (ev["event_at"], new_attr["portable_data_hash"]) if new_attr else None
+
+ item.update(to_record_version=record_version)
+ else:
+ item.update()
oldowner = ev.get("properties") and ev["properties"].get("old_attributes") and ev["properties"]["old_attributes"].get("owner_uuid")
olditemparent = self.inodes.inode_cache.find(oldowner)
itemparent.invalidate()
itemparent.update()
+
@catch_exceptions
def getattr(self, inode):
if inode not in self.inodes:
if isinstance(e, FuseArvadosFile):
entry.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
- if e.writable():
+ if self.enable_write and e.writable():
entry.st_mode |= stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
entry.st_nlink = 1
@catch_exceptions
def read(self, fh, off, size):
_logger.debug("arv-mount read %i %i %i", fh, off, size)
-
if fh in self._filehandles:
handle = self._filehandles[fh]
else:
self.inodes.touch(handle.obj)
- try:
- return handle.obj.readfrom(off, size, self.num_retries)
- except arvados.errors.NotFoundError as e:
- _logger.error("Block not found: " + str(e))
- raise llfuse.FUSEError(errno.EIO)
+ return handle.obj.readfrom(off, size, self.num_retries)
@catch_exceptions
def write(self, fh, off, buf):
if fh in self._filehandles:
try:
self._filehandles[fh].flush()
- except EnvironmentError as e:
- raise llfuse.FUSEError(e.errno)
except Exception:
- _logger.exception("Flush error")
- self._filehandles[fh].release()
- del self._filehandles[fh]
+ raise
+ finally:
+ self._filehandles[fh].release()
+ del self._filehandles[fh]
self.inodes.inode_cache.cap_cache()
def releasedir(self, fh):
return st
def _check_writable(self, inode_parent):
+ if not self.enable_write:
+ raise llfuse.FUSEError(errno.EROFS)
+
if inode_parent in self.inodes:
p = self.inodes[inode_parent]
else:
@catch_exceptions
def create(self, inode_parent, name, mode, flags, ctx):
+ _logger.debug("arv-mount create: %i '%s' %o", inode_parent, name, mode)
+
p = self._check_writable(inode_parent)
p.create(name)