21541: Manage memory related to project directories
[arvados.git] / services / fuse / arvados_fuse / fusedir.py
index 2b963d9a68659c342de818af52789f0d96031ef3..0671670c9d5b4a238f7f76dbc76457fdd11e808a 100644 (file)
@@ -2,11 +2,6 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-from __future__ import absolute_import
-from __future__ import division
-from future.utils import viewitems
-from future.utils import itervalues
-from builtins import dict
 import apiclient
 import arvados
 import errno
@@ -31,7 +26,7 @@ _logger = logging.getLogger('arvados.arvados_fuse')
 # Match any character which FUSE or Linux cannot accommodate as part
 # of a filename. (If present in a collection filename, they will
 # appear as underscores in the fuse mount.)
-_disallowed_filename_characters = re.compile('[\x00/]')
+_disallowed_filename_characters = re.compile(r'[\x00/]')
 
 
 class Directory(FreshBase):
@@ -41,7 +36,7 @@ class Directory(FreshBase):
     and the value referencing a File or Directory object.
     """
 
-    def __init__(self, parent_inode, inodes, apiconfig):
+    def __init__(self, parent_inode, inodes, apiconfig, enable_write, filters):
         """parent_inode is the integer inode number"""
 
         super(Directory, self).__init__()
@@ -54,6 +49,20 @@ class Directory(FreshBase):
         self.apiconfig = apiconfig
         self._entries = {}
         self._mtime = time.time()
+        self._enable_write = enable_write
+        self._filters = filters or []
+
+    def _filters_for(self, subtype, *, qualified):
+        for f in self._filters:
+            f_type, _, f_name = f[0].partition('.')
+            if not f_name:
+                yield f
+            elif f_type != subtype:
+                pass
+            elif qualified:
+                yield f
+            else:
+                yield [f_name, *f[1:]]
 
     def forward_slash_subst(self):
         if not hasattr(self, '_fsns'):
@@ -141,6 +150,9 @@ class Directory(FreshBase):
         self.inodes.touch(self)
         super(Directory, self).fresh()
 
+    def objsize(self):
+        return len(self._entries) * 64
+
     def merge(self, items, fn, same, new_entry):
         """Helper method for updating the contents of the directory.
 
@@ -167,36 +179,52 @@ class Directory(FreshBase):
         changed = False
         for i in items:
             name = self.sanitize_filename(fn(i))
-            if name:
-                if name in oldentries and same(oldentries[name], i):
+            if not name:
+                continue
+            if name in oldentries:
+                ent = oldentries[name]
+                ent.inc_use()
+                if same(ent, i):
                     # move existing directory entry over
-                    self._entries[name] = oldentries[name]
+                    self._entries[name] = ent
                     del oldentries[name]
-                else:
-                    _logger.debug("Adding entry '%s' to inode %i", name, self.inode)
-                    # create new directory entry
-                    ent = new_entry(i)
-                    if ent is not None:
-                        self._entries[name] = self.inodes.add_entry(ent)
-                        changed = True
+
+        for i in items:
+            name = self.sanitize_filename(fn(i))
+            if not name:
+                continue
+            if name not in self._entries:
+                _logger.debug("Adding entry '%s' to inode %i", name, self.inode)
+                # create new directory entry
+                ent = new_entry(i)
+                if ent is not None:
+                    ent.inc_use()
+                    self._entries[name] = self.inodes.add_entry(ent)
+                    changed = True
 
         # delete any other directory entries that were not in found in 'items'
         for i in oldentries:
             _logger.debug("Forgetting about entry '%s' on inode %i", i, self.inode)
             self.inodes.invalidate_entry(self, i)
             self.inodes.del_entry(oldentries[i])
+            ent.dec_use()
             changed = True
 
         if changed:
             self.inodes.invalidate_inode(self)
             self._mtime = time.time()
+            self.inodes.inode_cache.update_cache_size(self)
+            self.inodes.inode_cache.cap_cache()
+
+        for ent in self._entries.values():
+           ent.dec_use()
 
         self.fresh()
 
     def in_use(self):
         if super(Directory, self).in_use():
             return True
-        for v in itervalues(self._entries):
+        for v in self._entries.values():
             if v.in_use():
                 return True
         return False
@@ -204,7 +232,7 @@ class Directory(FreshBase):
     def has_ref(self, only_children):
         if super(Directory, self).has_ref(only_children):
             return True
-        for v in itervalues(self._entries):
+        for v in self._entries.values():
             if v.has_ref(False):
                 return True
         return False
@@ -214,7 +242,6 @@ class Directory(FreshBase):
         oldentries = self._entries
         self._entries = {}
         for n in oldentries:
-            oldentries[n].clear()
             self.inodes.del_entry(oldentries[n])
         self.invalidate()
 
@@ -226,7 +253,7 @@ class Directory(FreshBase):
         # Find self on the parent in order to invalidate this path.
         # Calling the public items() method might trigger a refresh,
         # which we definitely don't want, so read the internal dict directly.
-        for k,v in viewitems(parent._entries):
+        for k,v in parent._entries.items():
             if v is self:
                 self.inodes.invalidate_entry(parent, k)
                 break
@@ -274,10 +301,12 @@ class CollectionDirectoryBase(Directory):
 
     """
 
-    def __init__(self, parent_inode, inodes, apiconfig, collection):
-        super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig)
+    def __init__(self, parent_inode, inodes, apiconfig, enable_write, filters, collection, collection_root):
+        super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig, enable_write, filters)
         self.apiconfig = apiconfig
         self.collection = collection
+        self.collection_root = collection_root
+        self.collection_record_file = None
 
     def new_entry(self, name, item, mtime):
         name = self.sanitize_filename(name)
@@ -289,13 +318,25 @@ class CollectionDirectoryBase(Directory):
             item.fuse_entry.dead = False
             self._entries[name] = item.fuse_entry
         elif isinstance(item, arvados.collection.RichCollectionBase):
-            self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, item))
+            self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(
+                self.inode,
+                self.inodes,
+                self.apiconfig,
+                self._enable_write,
+                self._filters,
+                item,
+                self.collection_root,
+            ))
             self._entries[name].populate(mtime)
         else:
-            self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime))
+            self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime, self._enable_write))
         item.fuse_entry = self._entries[name]
 
     def on_event(self, event, collection, name, item):
+        # These are events from the Collection object (ADD/DEL/MOD)
+        # emitted by operations on the Collection object (like
+        # "mkdirs" or "remove"), and by "update", which we need to
+        # synchronize with our FUSE objects that are assigned inodes.
         if collection == self.collection:
             name = self.sanitize_filename(name)
 
@@ -340,6 +381,10 @@ class CollectionDirectoryBase(Directory):
                                 self.inodes.invalidate_inode(item.fuse_entry)
                             elif name in self._entries:
                                 self.inodes.invalidate_inode(self._entries[name])
+
+                        if self.collection_record_file is not None:
+                            self.collection_record_file.invalidate()
+                            self.inodes.invalidate_inode(self.collection_record_file)
             finally:
                 while lockcount > 0:
                     self.collection.lock.acquire()
@@ -347,33 +392,39 @@ class CollectionDirectoryBase(Directory):
 
     def populate(self, mtime):
         self._mtime = mtime
-        self.collection.subscribe(self.on_event)
-        for entry, item in viewitems(self.collection):
-            self.new_entry(entry, item, self.mtime())
+        with self.collection.lock:
+            self.collection.subscribe(self.on_event)
+            for entry, item in self.collection.items():
+                self.new_entry(entry, item, self.mtime())
 
     def writable(self):
-        return self.collection.writable()
+        return self._enable_write and self.collection.writable()
 
     @use_counter
     def flush(self):
-        with llfuse.lock_released:
-            self.collection.root_collection().save()
+        self.collection_root.flush()
 
     @use_counter
     @check_update
     def create(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.open(name, "w").close()
 
     @use_counter
     @check_update
     def mkdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.mkdirs(name)
 
     @use_counter
     @check_update
     def unlink(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.remove(name)
         self.flush()
@@ -381,6 +432,8 @@ class CollectionDirectoryBase(Directory):
     @use_counter
     @check_update
     def rmdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.remove(name)
         self.flush()
@@ -388,6 +441,9 @@ class CollectionDirectoryBase(Directory):
     @use_counter
     @check_update
     def rename(self, name_old, name_new, src):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         if not isinstance(src, CollectionDirectoryBase):
             raise llfuse.FUSEError(errno.EPERM)
 
@@ -417,12 +473,10 @@ class CollectionDirectoryBase(Directory):
 class CollectionDirectory(CollectionDirectoryBase):
     """Represents the root of a directory tree representing a collection."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, collection_record=None, explicit_collection=None):
-        super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, None)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters=None, collection_record=None, explicit_collection=None):
+        super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters, None, self)
         self.api = api
         self.num_retries = num_retries
-        self.collection_record_file = None
-        self.collection_record = None
         self._poll = True
         try:
             self._poll_time = (api._rootDesc.get('blobSignatureTtl', 60*60*2) // 2)
@@ -438,70 +492,80 @@ class CollectionDirectory(CollectionDirectoryBase):
             self._mtime = 0
         self._manifest_size = 0
         if self.collection_locator:
-            self._writable = (uuid_pattern.match(self.collection_locator) is not None)
+            self._writable = (uuid_pattern.match(self.collection_locator) is not None) and enable_write
         self._updating_lock = threading.Lock()
 
     def same(self, i):
         return i['uuid'] == self.collection_locator or i['portable_data_hash'] == self.collection_locator
 
     def writable(self):
-        return self.collection.writable() if self.collection is not None else self._writable
+        return self._enable_write and (self.collection.writable() if self.collection is not None else self._writable)
+
+    @use_counter
+    def flush(self):
+        if not self.writable():
+            return
+        with llfuse.lock_released:
+            with self._updating_lock:
+                if self.collection.committed():
+                    self.collection.update()
+                else:
+                    self.collection.save()
+                self.new_collection_record(self.collection.api_response())
 
     def want_event_subscribe(self):
         return (uuid_pattern.match(self.collection_locator) is not None)
 
-    # Used by arv-web.py to switch the contents of the CollectionDirectory
-    def change_collection(self, new_locator):
-        """Switch the contents of the CollectionDirectory.
-
-        Must be called with llfuse.lock held.
-        """
-
-        self.collection_locator = new_locator
-        self.collection_record = None
-        self.update()
-
     def new_collection(self, new_collection_record, coll_reader):
         if self.inode:
             self.clear()
-
-        self.collection_record = new_collection_record
-
-        if self.collection_record:
-            self._mtime = convertTime(self.collection_record.get('modified_at'))
-            self.collection_locator = self.collection_record["uuid"]
-            if self.collection_record_file is not None:
-                self.collection_record_file.update(self.collection_record)
-
         self.collection = coll_reader
+        self.new_collection_record(new_collection_record)
         self.populate(self.mtime())
 
+    def new_collection_record(self, new_collection_record):
+        if not new_collection_record:
+            raise Exception("invalid new_collection_record")
+        self._mtime = convertTime(new_collection_record.get('modified_at'))
+        self._manifest_size = len(new_collection_record["manifest_text"])
+        self.collection_locator = new_collection_record["uuid"]
+        if self.collection_record_file is not None:
+            self.collection_record_file.invalidate()
+            self.inodes.invalidate_inode(self.collection_record_file)
+            _logger.debug("%s invalidated collection record file", self)
+        self.inodes.inode_cache.update_cache_size(self)
+        self.fresh()
+
     def uuid(self):
         return self.collection_locator
 
     @use_counter
-    def update(self, to_record_version=None):
+    def update(self):
         try:
-            if self.collection_record is not None and portable_data_hash_pattern.match(self.collection_locator):
+            if self.collection is not None and portable_data_hash_pattern.match(self.collection_locator):
+                # It's immutable, nothing to update
                 return True
 
             if self.collection_locator is None:
+                # No collection locator to retrieve from
                 self.fresh()
                 return True
 
+            new_collection_record = None
             try:
                 with llfuse.lock_released:
                     self._updating_lock.acquire()
                     if not self.stale():
-                        return
+                        return True
 
-                    _logger.debug("Updating collection %s inode %s to record version %s", self.collection_locator, self.inode, to_record_version)
+                    _logger.debug("Updating collection %s inode %s", self.collection_locator, self.inode)
+                    coll_reader = None
                     if self.collection is not None:
-                        if self.collection.known_past_version(to_record_version):
-                            _logger.debug("%s already processed %s", self.collection_locator, to_record_version)
-                        else:
-                            self.collection.update()
+                        # Already have a collection object
+                        self.collection.update()
+                        new_collection_record = self.collection.api_response()
                     else:
+                        # Create a new collection object
                         if uuid_pattern.match(self.collection_locator):
                             coll_reader = arvados.collection.Collection(
                                 self.collection_locator, self.api, self.api.keep,
@@ -522,14 +586,14 @@ class CollectionDirectory(CollectionDirectoryBase):
                         if 'storage_classes_desired' not in new_collection_record:
                             new_collection_record['storage_classes_desired'] = coll_reader.storage_classes_desired()
 
-                        if self.collection_record is None or self.collection_record["portable_data_hash"] != new_collection_record.get("portable_data_hash"):
-                            self.new_collection(new_collection_record, coll_reader)
-
-                        self._manifest_size = len(coll_reader.manifest_text())
-                        _logger.debug("%s manifest_size %i", self, self._manifest_size)
                 # end with llfuse.lock_released, re-acquire lock
 
-                self.fresh()
+                if new_collection_record is not None:
+                    if coll_reader is not None:
+                        self.new_collection(new_collection_record, coll_reader)
+                    else:
+                        self.new_collection_record(new_collection_record)
+
                 return True
             finally:
                 self._updating_lock.release()
@@ -537,22 +601,29 @@ class CollectionDirectory(CollectionDirectoryBase):
             _logger.error("Error fetching collection '%s': %s", self.collection_locator, e)
         except arvados.errors.ArgumentError as detail:
             _logger.warning("arv-mount %s: error %s", self.collection_locator, detail)
-            if self.collection_record is not None and "manifest_text" in self.collection_record:
-                _logger.warning("arv-mount manifest_text is: %s", self.collection_record["manifest_text"])
+            if new_collection_record is not None and "manifest_text" in new_collection_record:
+                _logger.warning("arv-mount manifest_text is: %s", new_collection_record["manifest_text"])
         except Exception:
             _logger.exception("arv-mount %s: error", self.collection_locator)
-            if self.collection_record is not None and "manifest_text" in self.collection_record:
-                _logger.error("arv-mount manifest_text is: %s", self.collection_record["manifest_text"])
+            if new_collection_record is not None and "manifest_text" in new_collection_record:
+                _logger.error("arv-mount manifest_text is: %s", new_collection_record["manifest_text"])
         self.invalidate()
         return False
 
+    @use_counter
+    def collection_record(self):
+        self.flush()
+        return self.collection.api_response()
+
     @use_counter
     @check_update
     def __getitem__(self, item):
         if item == '.arvados#collection':
             if self.collection_record_file is None:
-                self.collection_record_file = ObjectFile(self.inode, self.collection_record)
+                self.collection_record_file = FuncToJSONFile(
+                    self.inode, self.collection_record)
                 self.inodes.add_entry(self.collection_record_file)
+            self.invalidate()  # use lookup as a signal to force update
             return self.collection_record_file
         else:
             return super(CollectionDirectory, self).__getitem__(item)
@@ -564,8 +635,9 @@ class CollectionDirectory(CollectionDirectoryBase):
             return super(CollectionDirectory, self).__contains__(k)
 
     def invalidate(self):
-        self.collection_record = None
-        self.collection_record_file = None
+        if self.collection_record_file is not None:
+            self.collection_record_file.invalidate()
+            self.inodes.invalidate_inode(self.collection_record_file)
         super(CollectionDirectory, self).invalidate()
 
     def persisted(self):
@@ -588,6 +660,7 @@ class CollectionDirectory(CollectionDirectoryBase):
             self.collection.stop_threads()
         super(CollectionDirectory, self).clear()
         self._manifest_size = 0
+        self.inodes.inode_cache.update_cache_size(self)
 
 
 class TmpCollectionDirectory(CollectionDirectoryBase):
@@ -605,40 +678,42 @@ class TmpCollectionDirectory(CollectionDirectoryBase):
         def save_new(self):
             pass
 
-    def __init__(self, parent_inode, inodes, api_client, num_retries, storage_classes=None):
+    def __init__(self, parent_inode, inodes, api_client, num_retries, enable_write, filters=None, storage_classes=None):
         collection = self.UnsaveableCollection(
             api_client=api_client,
             keep_client=api_client.keep,
             num_retries=num_retries,
             storage_classes_desired=storage_classes)
+        # This is always enable_write=True because it never tries to
+        # save to the backend
         super(TmpCollectionDirectory, self).__init__(
-            parent_inode, inodes, api_client.config, collection)
-        self.collection_record_file = None
+            parent_inode, inodes, api_client.config, True, filters, collection, self)
         self.populate(self.mtime())
 
     def on_event(self, *args, **kwargs):
         super(TmpCollectionDirectory, self).on_event(*args, **kwargs)
-        if self.collection_record_file:
+        if self.collection_record_file is None:
+            return
 
-            # See discussion in CollectionDirectoryBase.on_event
-            lockcount = 0
-            try:
-                while True:
-                    self.collection.lock.release()
-                    lockcount += 1
-            except RuntimeError:
-                pass
+        # See discussion in CollectionDirectoryBase.on_event
+        lockcount = 0
+        try:
+            while True:
+                self.collection.lock.release()
+                lockcount += 1
+        except RuntimeError:
+            pass
 
-            try:
-                with llfuse.lock:
-                    with self.collection.lock:
-                        self.collection_record_file.invalidate()
-                        self.inodes.invalidate_inode(self.collection_record_file)
-                        _logger.debug("%s invalidated collection record", self)
-            finally:
-                while lockcount > 0:
-                    self.collection.lock.acquire()
-                    lockcount -= 1
+        try:
+            with llfuse.lock:
+                with self.collection.lock:
+                    self.collection_record_file.invalidate()
+                    self.inodes.invalidate_inode(self.collection_record_file)
+                    _logger.debug("%s invalidated collection record", self)
+        finally:
+            while lockcount > 0:
+                self.collection.lock.acquire()
+                lockcount -= 1
 
     def collection_record(self):
         with llfuse.lock_released:
@@ -669,6 +744,9 @@ class TmpCollectionDirectory(CollectionDirectoryBase):
     def writable(self):
         return True
 
+    def flush(self):
+        pass
+
     def want_event_subscribe(self):
         return False
 
@@ -705,8 +783,8 @@ and the directory will appear if it exists.
 
 """.lstrip()
 
-    def __init__(self, parent_inode, inodes, api, num_retries, pdh_only=False, storage_classes=None):
-        super(MagicDirectory, self).__init__(parent_inode, inodes, api.config)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, pdh_only=False, storage_classes=None):
+        super(MagicDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
         self.api = api
         self.num_retries = num_retries
         self.pdh_only = pdh_only
@@ -722,7 +800,14 @@ and the directory will appear if it exists.
             # If we're the root directory, add an identical by_id subdirectory.
             if self.inode == llfuse.ROOT_INODE:
                 self._entries['by_id'] = self.inodes.add_entry(MagicDirectory(
-                        self.inode, self.inodes, self.api, self.num_retries, self.pdh_only))
+                    self.inode,
+                    self.inodes,
+                    self.api,
+                    self.num_retries,
+                    self._enable_write,
+                    self._filters,
+                    self.pdh_only,
+                ))
 
     def __contains__(self, k):
         if k in self._entries:
@@ -736,15 +821,34 @@ and the directory will appear if it exists.
 
             if group_uuid_pattern.match(k):
                 project = self.api.groups().list(
-                    filters=[['group_class', 'in', ['project','filter']], ["uuid", "=", k]]).execute(num_retries=self.num_retries)
+                    filters=[
+                        ['group_class', 'in', ['project','filter']],
+                        ["uuid", "=", k],
+                        *self._filters_for('groups', qualified=False),
+                    ],
+                ).execute(num_retries=self.num_retries)
                 if project[u'items_available'] == 0:
                     return False
                 e = self.inodes.add_entry(ProjectDirectory(
-                    self.inode, self.inodes, self.api, self.num_retries,
-                    project[u'items'][0], storage_classes=self.storage_classes))
+                    self.inode,
+                    self.inodes,
+                    self.api,
+                    self.num_retries,
+                    self._enable_write,
+                    self._filters,
+                    project[u'items'][0],
+                    storage_classes=self.storage_classes,
+                ))
             else:
                 e = self.inodes.add_entry(CollectionDirectory(
-                        self.inode, self.inodes, self.api, self.num_retries, k))
+                    self.inode,
+                    self.inodes,
+                    self.api,
+                    self.num_retries,
+                    self._enable_write,
+                    self._filters,
+                    k,
+                ))
 
             if e.update():
                 if k not in self._entries:
@@ -778,8 +882,8 @@ and the directory will appear if it exists.
 class TagsDirectory(Directory):
     """A special directory that contains as subdirectories all tags visible to the user."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, poll_time=60):
-        super(TagsDirectory, self).__init__(parent_inode, inodes, api.config)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, poll_time=60):
+        super(TagsDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
         self.api = api
         self.num_retries = num_retries
         self._poll = True
@@ -793,14 +897,32 @@ class TagsDirectory(Directory):
     def update(self):
         with llfuse.lock_released:
             tags = self.api.links().list(
-                filters=[['link_class', '=', 'tag'], ["name", "!=", ""]],
-                select=['name'], distinct=True, limit=1000
-                ).execute(num_retries=self.num_retries)
+                filters=[
+                    ['link_class', '=', 'tag'],
+                    ['name', '!=', ''],
+                    *self._filters_for('links', qualified=False),
+                ],
+                select=['name'],
+                distinct=True,
+                limit=1000,
+            ).execute(num_retries=self.num_retries)
         if "items" in tags:
-            self.merge(tags['items']+[{"name": n} for n in self._extra],
-                       lambda i: i['name'],
-                       lambda a, i: a.tag == i['name'],
-                       lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, i['name'], poll=self._poll, poll_time=self._poll_time))
+            self.merge(
+                tags['items']+[{"name": n} for n in self._extra],
+                lambda i: i['name'],
+                lambda a, i: a.tag == i['name'],
+                lambda i: TagDirectory(
+                    self.inode,
+                    self.inodes,
+                    self.api,
+                    self.num_retries,
+                    self._enable_write,
+                    self._filters,
+                    i['name'],
+                    poll=self._poll,
+                    poll_time=self._poll_time,
+                ),
+            )
 
     @use_counter
     @check_update
@@ -809,7 +931,12 @@ class TagsDirectory(Directory):
             return super(TagsDirectory, self).__getitem__(item)
         with llfuse.lock_released:
             tags = self.api.links().list(
-                filters=[['link_class', '=', 'tag'], ['name', '=', item]], limit=1
+                filters=[
+                    ['link_class', '=', 'tag'],
+                    ['name', '=', item],
+                    *self._filters_for('links', qualified=False),
+                ],
+                limit=1,
             ).execute(num_retries=self.num_retries)
         if tags["items"]:
             self._extra.add(item)
@@ -834,9 +961,9 @@ class TagDirectory(Directory):
     to the user that are tagged with a particular tag.
     """
 
-    def __init__(self, parent_inode, inodes, api, num_retries, tag,
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, tag,
                  poll=False, poll_time=60):
-        super(TagDirectory, self).__init__(parent_inode, inodes, api.config)
+        super(TagDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
         self.api = api
         self.num_retries = num_retries
         self.tag = tag
@@ -850,23 +977,36 @@ class TagDirectory(Directory):
     def update(self):
         with llfuse.lock_released:
             taggedcollections = self.api.links().list(
-                filters=[['link_class', '=', 'tag'],
-                         ['name', '=', self.tag],
-                         ['head_uuid', 'is_a', 'arvados#collection']],
-                select=['head_uuid']
-                ).execute(num_retries=self.num_retries)
-        self.merge(taggedcollections['items'],
-                   lambda i: i['head_uuid'],
-                   lambda a, i: a.collection_locator == i['head_uuid'],
-                   lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid']))
+                filters=[
+                    ['link_class', '=', 'tag'],
+                    ['name', '=', self.tag],
+                    ['head_uuid', 'is_a', 'arvados#collection'],
+                    *self._filters_for('links', qualified=False),
+                ],
+                select=['head_uuid'],
+            ).execute(num_retries=self.num_retries)
+        self.merge(
+            taggedcollections['items'],
+            lambda i: i['head_uuid'],
+            lambda a, i: a.collection_locator == i['head_uuid'],
+            lambda i: CollectionDirectory(
+                self.inode,
+                self.inodes,
+                self.api,
+                self.num_retries,
+                self._enable_write,
+                self._filters,
+                i['head_uuid'],
+            ),
+        )
 
 
 class ProjectDirectory(Directory):
     """A special directory that contains the contents of a project."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, project_object,
-                 poll=True, poll_time=3, storage_classes=None):
-        super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,
+                 project_object, poll=True, poll_time=3, storage_classes=None):
+        super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
         self.api = api
         self.num_retries = num_retries
         self.project_object = project_object
@@ -883,13 +1023,14 @@ class ProjectDirectory(Directory):
         return True
 
     def createDirectory(self, i):
+        common_args = (self.inode, self.inodes, self.api, self.num_retries, self._enable_write, self._filters)
         if collection_uuid_pattern.match(i['uuid']):
-            return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i)
+            return CollectionDirectory(*common_args, i)
         elif group_uuid_pattern.match(i['uuid']):
-            return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i, self._poll, self._poll_time, self.storage_classes)
+            return ProjectDirectory(*common_args, i, self._poll, self._poll_time, self.storage_classes)
         elif link_uuid_pattern.match(i['uuid']):
             if i['head_kind'] == 'arvados#collection' or portable_data_hash_pattern.match(i['head_uuid']):
-                return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid'])
+                return CollectionDirectory(*common_args, i['head_uuid'])
             else:
                 return None
         elif uuid_pattern.match(i['uuid']):
@@ -939,10 +1080,14 @@ class ProjectDirectory(Directory):
 
         try:
             with llfuse.lock_released:
+                _logger.debug("Getting lock to update %s", self.project_uuid)
                 self._updating_lock.acquire()
                 if not self.stale():
+                    _logger.debug("%s was updated already", self.project_uuid)
                     return
 
+                _logger.debug("Requesting update of %s", self.project_uuid)
+
                 if group_uuid_pattern.match(self.project_uuid):
                     self.project_object = self.api.groups().get(
                         uuid=self.project_uuid).execute(num_retries=self.num_retries)
@@ -950,18 +1095,27 @@ class ProjectDirectory(Directory):
                     self.project_object = self.api.users().get(
                         uuid=self.project_uuid).execute(num_retries=self.num_retries)
                 # do this in 2 steps until #17424 is fixed
-                contents = list(arvados.util.keyset_list_all(self.api.groups().contents,
-                                                        order_key="uuid",
-                                                        num_retries=self.num_retries,
-                                                        uuid=self.project_uuid,
-                                                        filters=[["uuid", "is_a", "arvados#group"],
-                                                                 ["groups.group_class", "in", ["project","filter"]]]))
-                contents.extend(arvados.util.keyset_list_all(self.api.groups().contents,
-                                                             order_key="uuid",
-                                                             num_retries=self.num_retries,
-                                                             uuid=self.project_uuid,
-                                                             filters=[["uuid", "is_a", "arvados#collection"]]))
-
+                contents = list(arvados.util.keyset_list_all(
+                    self.api.groups().contents,
+                    order_key='uuid',
+                    num_retries=self.num_retries,
+                    uuid=self.project_uuid,
+                    filters=[
+                        ['uuid', 'is_a', 'arvados#group'],
+                        ['groups.group_class', 'in', ['project', 'filter']],
+                        *self._filters_for('groups', qualified=True),
+                    ],
+                ))
+                contents.extend(obj for obj in arvados.util.keyset_list_all(
+                    self.api.groups().contents,
+                    order_key='uuid',
+                    num_retries=self.num_retries,
+                    uuid=self.project_uuid,
+                    filters=[
+                        ['uuid', 'is_a', 'arvados#collection'],
+                        *self._filters_for('collections', qualified=True),
+                    ],
+                ) if obj['current_version_uuid'] == obj['uuid'])
             # end with llfuse.lock_released, re-acquire lock
 
             self.merge(contents,
@@ -990,14 +1144,24 @@ class ProjectDirectory(Directory):
                 namefilter = ["name", "=", k]
             else:
                 namefilter = ["name", "in", [k, k2]]
-            contents = self.api.groups().list(filters=[["owner_uuid", "=", self.project_uuid],
-                                                       ["group_class", "in", ["project","filter"]],
-                                                       namefilter],
-                                              limit=2).execute(num_retries=self.num_retries)["items"]
+            contents = self.api.groups().list(
+                filters=[
+                    ["owner_uuid", "=", self.project_uuid],
+                    ["group_class", "in", ["project","filter"]],
+                    namefilter,
+                    *self._filters_for('groups', qualified=False),
+                ],
+                limit=2,
+            ).execute(num_retries=self.num_retries)["items"]
             if not contents:
-                contents = self.api.collections().list(filters=[["owner_uuid", "=", self.project_uuid],
-                                                                namefilter],
-                                                       limit=2).execute(num_retries=self.num_retries)["items"]
+                contents = self.api.collections().list(
+                    filters=[
+                        ["owner_uuid", "=", self.project_uuid],
+                        namefilter,
+                        *self._filters_for('collections', qualified=False),
+                    ],
+                    limit=2,
+                ).execute(num_retries=self.num_retries)["items"]
         if contents:
             if len(contents) > 1 and contents[1]['name'] == k:
                 # If "foo/bar" and "foo[SUBST]bar" both exist, use
@@ -1024,6 +1188,8 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def writable(self):
+        if not self._enable_write:
+            return False
         with llfuse.lock_released:
             if not self._current_user:
                 self._current_user = self.api.users().current().execute(num_retries=self.num_retries)
@@ -1035,6 +1201,9 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def mkdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         try:
             with llfuse.lock_released:
                 c = {
@@ -1055,6 +1224,9 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def rmdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         if name not in self:
             raise llfuse.FUSEError(errno.ENOENT)
         if not isinstance(self[name], CollectionDirectory):
@@ -1068,6 +1240,9 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def rename(self, name_old, name_new, src):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         if not isinstance(src, ProjectDirectory):
             raise llfuse.FUSEError(errno.EPERM)
 
@@ -1140,9 +1315,9 @@ class ProjectDirectory(Directory):
 class SharedDirectory(Directory):
     """A special directory that represents users or groups who have shared projects with me."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, exclude,
-                 poll=False, poll_time=60, storage_classes=None):
-        super(SharedDirectory, self).__init__(parent_inode, inodes, api.config)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,
+                 exclude, poll=False, poll_time=60, storage_classes=None):
+        super(SharedDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
         self.api = api
         self.num_retries = num_retries
         self.current_user = api.users().current().execute(num_retries=num_retries)
@@ -1168,11 +1343,17 @@ class SharedDirectory(Directory):
                 if 'httpMethod' in methods.get('shared', {}):
                     page = []
                     while True:
-                        resp = self.api.groups().shared(filters=[['group_class', 'in', ['project','filter']]]+page,
-                                                        order="uuid",
-                                                        limit=10000,
-                                                        count="none",
-                                                        include="owner_uuid").execute()
+                        resp = self.api.groups().shared(
+                            filters=[
+                                ['group_class', 'in', ['project','filter']],
+                                *page,
+                                *self._filters_for('groups', qualified=False),
+                            ],
+                            order="uuid",
+                            limit=10000,
+                            count="none",
+                            include="owner_uuid",
+                        ).execute()
                         if not resp["items"]:
                             break
                         page = [["uuid", ">", resp["items"][len(resp["items"])-1]["uuid"]]]
@@ -1187,8 +1368,12 @@ class SharedDirectory(Directory):
                         self.api.groups().list,
                         order_key="uuid",
                         num_retries=self.num_retries,
-                        filters=[['group_class','in',['project','filter']]],
-                        select=["uuid", "owner_uuid"]))
+                        filters=[
+                            ['group_class', 'in', ['project','filter']],
+                            *self._filters_for('groups', qualified=False),
+                        ],
+                        select=["uuid", "owner_uuid"],
+                    ))
                     for ob in all_projects:
                         objects[ob['uuid']] = ob
 
@@ -1202,13 +1387,20 @@ class SharedDirectory(Directory):
                         self.api.users().list,
                         order_key="uuid",
                         num_retries=self.num_retries,
-                        filters=[['uuid','in', list(root_owners)]])
+                        filters=[
+                            ['uuid', 'in', list(root_owners)],
+                            *self._filters_for('users', qualified=False),
+                        ],
+                    )
                     lgroups = arvados.util.keyset_list_all(
                         self.api.groups().list,
                         order_key="uuid",
                         num_retries=self.num_retries,
-                        filters=[['uuid','in', list(root_owners)+roots]])
-
+                        filters=[
+                            ['uuid', 'in', list(root_owners)+roots],
+                            *self._filters_for('groups', qualified=False),
+                        ],
+                    )
                     for l in lusers:
                         objects[l["uuid"]] = l
                     for l in lgroups:
@@ -1230,10 +1422,23 @@ class SharedDirectory(Directory):
 
             # end with llfuse.lock_released, re-acquire lock
 
-            self.merge(viewitems(contents),
-                       lambda i: i[0],
-                       lambda a, i: a.uuid() == i[1]['uuid'],
-                       lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i[1], poll=self._poll, poll_time=self._poll_time, storage_classes=self.storage_classes))
+            self.merge(
+                contents.items(),
+                lambda i: i[0],
+                lambda a, i: a.uuid() == i[1]['uuid'],
+                lambda i: ProjectDirectory(
+                    self.inode,
+                    self.inodes,
+                    self.api,
+                    self.num_retries,
+                    self._enable_write,
+                    self._filters,
+                    i[1],
+                    poll=self._poll,
+                    poll_time=self._poll_time,
+                    storage_classes=self.storage_classes,
+                ),
+            )
         except Exception:
             _logger.exception("arv-mount shared dir error")
         finally: