1 # Copyright (C) The Arvados Authors. All rights reserved.
3 # SPDX-License-Identifier: Apache-2.0
15 _logger = logging.getLogger('arvados.keep')
17 cacheblock_suffix = ".keepcacheblock"
19 class DiskCacheSlot(object):
20 __slots__ = ("locator", "ready", "content", "cachedir")
22 def __init__(self, locator, cachedir):
23 self.locator = locator
24 self.ready = threading.Event()
26 self.cachedir = cachedir
40 # Can't mmap a 0 length file
44 if self.content is not None:
45 # Has been set already
48 blockdir = os.path.join(self.cachedir, self.locator[0:3])
49 os.makedirs(blockdir, mode=0o700, exist_ok=True)
51 final = os.path.join(blockdir, self.locator) + cacheblock_suffix
53 f = tempfile.NamedTemporaryFile(dir=blockdir, delete=False, prefix="tmp", suffix=cacheblock_suffix)
55 os.chmod(tmpfile, stat.S_IRUSR | stat.S_IWUSR)
57 # aquire a shared lock, this tells other processes that
58 # we're using this block and to please not delete it.
59 fcntl.flock(f, fcntl.LOCK_SH)
63 os.rename(tmpfile, final)
66 self.content = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
68 if e.errno == errno.ENODEV:
69 _logger.error("Unable to use disk cache: The underlying filesystem does not support memory mapping.")
70 elif e.errno == errno.ENOMEM:
71 _logger.error("Unable to use disk cache: The process's maximum number of mappings would have been exceeded.")
72 elif e.errno == errno.ENOSPC:
73 _logger.error("Unable to use disk cache: Out of disk space.")
76 except Exception as e:
79 if tmpfile is not None:
80 # If the tempfile hasn't been renamed on disk yet, try to delete it.
85 if self.content is None:
86 # Something went wrong with the disk cache, fall back
87 # to RAM cache behavior (the alternative is to cache
88 # nothing and return a read error).
93 if self.content is None:
96 return len(self.content)
99 if self.content is not None and len(self.content) > 0:
100 # The mmap region might be in use when we decided to evict
101 # it. This can happen if the cache is too small.
103 # If we call close() now, it'll throw an error if
104 # something tries to access it.
106 # However, we don't need to explicitly call mmap.close()
108 # I confirmed in mmapmodule.c that that both close
109 # and deallocate do the same thing:
111 # a) close the file descriptor
112 # b) unmap the memory range
114 # So we can forget it in the cache and delete the file on
115 # disk, and it will tear it down after any other
116 # lingering Python references to the mapped memory are
119 blockdir = os.path.join(self.cachedir, self.locator[0:3])
120 final = os.path.join(blockdir, self.locator) + cacheblock_suffix
122 with open(final, "rb") as f:
124 fcntl.flock(f, fcntl.LOCK_UN)
127 # try to get an exclusive lock, this ensures other
128 # processes are not using the block. It is
129 # nonblocking and will throw an exception if we
130 # can't get it, which is fine because that means
131 # we just won't try to delete it.
133 # I should note here, the file locking is not
134 # strictly necessary, we could just remove it and
135 # the kernel would ensure that the underlying
136 # inode remains available as long as other
137 # processes still have the file open. However, if
138 # you have multiple processes sharing the cache
139 # and deleting each other's files, you'll end up
140 # with a bunch of ghost files that don't show up
141 # in the file system but are still taking up
142 # space, which isn't particularly user friendly.
143 # The locking strategy ensures that cache blocks
144 # in use remain visible.
146 fcntl.flock(filehandle, fcntl.LOCK_EX | fcntl.LOCK_NB)
155 def get_from_disk(locator, cachedir):
156 blockdir = os.path.join(cachedir, locator[0:3])
157 final = os.path.join(blockdir, locator) + cacheblock_suffix
160 filehandle = open(final, "rb")
162 # aquire a shared lock, this tells other processes that
163 # we're using this block and to please not delete it.
164 fcntl.flock(filehandle, fcntl.LOCK_SH)
166 content = mmap.mmap(filehandle.fileno(), 0, access=mmap.ACCESS_READ)
167 dc = DiskCacheSlot(locator, cachedir)
171 except FileNotFoundError:
173 except Exception as e:
174 traceback.print_exc()
179 def init_cache(cachedir, maxslots):
180 # map in all the files in the cache directory, up to max slots.
181 # after max slots, try to delete the excess blocks.
183 # this gives the calling process ownership of all the blocks
186 for root, dirs, files in os.walk(cachedir):
188 if not name.endswith(cacheblock_suffix):
191 blockpath = os.path.join(root, name)
192 res = os.stat(blockpath)
194 if len(name) == (32+len(cacheblock_suffix)) and not name.startswith("tmp"):
195 blocks.append((name[0:32], res.st_atime))
196 elif name.startswith("tmp") and ((time.time() - res.st_mtime) > 60):
197 # found a temporary file more than 1 minute old,
204 # sort by access time (atime), going from most recently
205 # accessed (highest timestamp) to least recently accessed
206 # (lowest timestamp).
207 blocks.sort(key=lambda x: x[1], reverse=True)
209 # Map in all the files we found, up to maxslots, if we exceed
210 # maxslots, start throwing things out.
213 got = DiskCacheSlot.get_from_disk(b[0], cachedir)
216 if len(cachelist) < maxslots:
217 cachelist.append(got)
219 # we found more blocks than maxslots, try to
220 # throw it out of the cache.