1 # Copyright (C) The Arvados Authors. All rights reserved.
3 # SPDX-License-Identifier: Apache-2.0
16 _logger = logging.getLogger('arvados.keep')
18 cacheblock_suffix = ".keepcacheblock"
20 class DiskCacheSlot(object):
21 __slots__ = ("locator", "ready", "content", "cachedir")
23 def __init__(self, locator, cachedir):
24 self.locator = locator
25 self.ready = threading.Event()
27 self.cachedir = cachedir
41 # Can't mmap a 0 length file
45 if self.content is not None:
46 # Has been set already
49 blockdir = os.path.join(self.cachedir, self.locator[0:3])
50 os.makedirs(blockdir, mode=0o700, exist_ok=True)
52 final = os.path.join(blockdir, self.locator) + cacheblock_suffix
54 f = tempfile.NamedTemporaryFile(dir=blockdir, delete=False, prefix="tmp", suffix=cacheblock_suffix)
56 os.chmod(tmpfile, stat.S_IRUSR | stat.S_IWUSR)
58 # aquire a shared lock, this tells other processes that
59 # we're using this block and to please not delete it.
60 fcntl.flock(f, fcntl.LOCK_SH)
64 os.rename(tmpfile, final)
67 self.content = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
69 if e.errno == errno.ENODEV:
70 _logger.error("Unable to use disk cache: The underlying filesystem does not support memory mapping.")
71 elif e.errno == errno.ENOMEM:
72 _logger.error("Unable to use disk cache: The process's maximum number of mappings would have been exceeded.")
73 elif e.errno == errno.ENOSPC:
74 _logger.error("Unable to use disk cache: Out of disk space.")
77 except Exception as e:
80 if tmpfile is not None:
81 # If the tempfile hasn't been renamed on disk yet, try to delete it.
86 if self.content is None:
87 # Something went wrong with the disk cache, fall back
88 # to RAM cache behavior (the alternative is to cache
89 # nothing and return a read error).
94 if self.content is None:
97 return len(self.content)
100 if self.content is not None and len(self.content) > 0:
101 # The mmap region might be in use when we decided to evict
102 # it. This can happen if the cache is too small.
104 # If we call close() now, it'll throw an error if
105 # something tries to access it.
107 # However, we don't need to explicitly call mmap.close()
109 # I confirmed in mmapmodule.c that that both close
110 # and deallocate do the same thing:
112 # a) close the file descriptor
113 # b) unmap the memory range
115 # So we can forget it in the cache and delete the file on
116 # disk, and it will tear it down after any other
117 # lingering Python references to the mapped memory are
120 blockdir = os.path.join(self.cachedir, self.locator[0:3])
121 final = os.path.join(blockdir, self.locator) + cacheblock_suffix
123 with open(final, "rb") as f:
125 fcntl.flock(f, fcntl.LOCK_UN)
128 # try to get an exclusive lock, this ensures other
129 # processes are not using the block. It is
130 # nonblocking and will throw an exception if we
131 # can't get it, which is fine because that means
132 # we just won't try to delete it.
134 # I should note here, the file locking is not
135 # strictly necessary, we could just remove it and
136 # the kernel would ensure that the underlying
137 # inode remains available as long as other
138 # processes still have the file open. However, if
139 # you have multiple processes sharing the cache
140 # and deleting each other's files, you'll end up
141 # with a bunch of ghost files that don't show up
142 # in the file system but are still taking up
143 # space, which isn't particularly user friendly.
144 # The locking strategy ensures that cache blocks
145 # in use remain visible.
147 fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
156 def get_from_disk(locator, cachedir):
157 blockdir = os.path.join(cachedir, locator[0:3])
158 final = os.path.join(blockdir, locator) + cacheblock_suffix
161 filehandle = open(final, "rb")
163 # aquire a shared lock, this tells other processes that
164 # we're using this block and to please not delete it.
165 fcntl.flock(filehandle, fcntl.LOCK_SH)
167 content = mmap.mmap(filehandle.fileno(), 0, access=mmap.ACCESS_READ)
168 dc = DiskCacheSlot(locator, cachedir)
172 except FileNotFoundError:
174 except Exception as e:
175 traceback.print_exc()
180 def init_cache(cachedir, maxslots):
181 # map in all the files in the cache directory, up to max slots.
182 # after max slots, try to delete the excess blocks.
184 # this gives the calling process ownership of all the blocks
187 for root, dirs, files in os.walk(cachedir):
189 if not name.endswith(cacheblock_suffix):
192 blockpath = os.path.join(root, name)
193 res = os.stat(blockpath)
195 if len(name) == (32+len(cacheblock_suffix)) and not name.startswith("tmp"):
196 blocks.append((name[0:32], res.st_atime))
197 elif name.startswith("tmp") and ((time.time() - res.st_mtime) > 60):
198 # found a temporary file more than 1 minute old,
205 # sort by access time (atime), going from most recently
206 # accessed (highest timestamp) to least recently accessed
207 # (lowest timestamp).
208 blocks.sort(key=lambda x: x[1], reverse=True)
210 # Map in all the files we found, up to maxslots, if we exceed
211 # maxslots, start throwing things out.
214 got = DiskCacheSlot.get_from_disk(b[0], cachedir)
217 if len(cachelist) < maxslots:
218 cachelist.append(got)
220 # we found more blocks than maxslots, try to
221 # throw it out of the cache.