+ def set(self, slot, blob):
+ tryagain = False
+
+ try:
+ slot.set(blob)
+ except OSError as e:
+ tryagain = True
+ if e.errno == errno.ENOMEM:
+ # Reduce max slots to current - 4, cap cache and retry
+ with self._cache_lock:
+ self._max_slots = max(4, len(self._cache) - 4)
+ elif e.errno == errno.ENOSPC:
+ # Reduce disk max space to current - 256 MiB, cap cache and retry
+ with self._cache_lock:
+ sm = sum([st.size() for st in self._cache])
+ self.cache_max = max((256 * 1024 * 1024), sm - (256 * 1024 * 1024))
+ elif e.errno == errno.ENODEV:
+ _logger.error("Unable to use disk cache: The underlying filesystem does not support memory mapping.")
+ except Exception as e:
+ tryagain = True
+
+ try:
+ if tryagain:
+ # There was an error. Evict some slots and try again.
+ self.cap_cache()
+ slot.set(blob)
+ except Exception as e:
+ # It failed again. Give up.
+ raise arvados.errors.KeepCacheError("Unable to save block %s to disk cache: %s" % (slot.locator, e))
+ finally:
+ # Set the notice that that we are done with the cache
+ # slot one way or another.
+ slot.ready.set()
+
+ self.cap_cache()
+