1 # Copyright (C) The Arvados Authors. All rights reserved.
3 # SPDX-License-Identifier: Apache-2.0
18 _logger = logging.getLogger('arvados.keep')
20 cacheblock_suffix = ".keepcacheblock"
22 class DiskCacheSlot(object):
23 __slots__ = ("locator", "ready", "content", "cachedir", "filehandle", "linger")
25 def __init__(self, locator, cachedir):
26 self.locator = locator
27 self.ready = threading.Event()
29 self.cachedir = cachedir
30 self.filehandle = None
35 # 'content' can None, an empty byte string, or a nonempty mmap
36 # region. If it is an mmap region, we want to advise the
37 # kernel we're going to use it. This nudges the kernel to
38 # re-read most or all of the block if necessary (instead of
39 # just a few pages at a time), reducing the number of page
40 # faults and improving performance by 4x compared to not
43 self.content.madvise(mmap.MADV_WILLNEED)
55 # Can't mmap a 0 length file
60 if self.content is not None:
61 # Has been set already
65 blockdir = os.path.join(self.cachedir, self.locator[0:3])
66 os.makedirs(blockdir, mode=0o700, exist_ok=True)
68 final = os.path.join(blockdir, self.locator) + cacheblock_suffix
70 self.filehandle = tempfile.NamedTemporaryFile(dir=blockdir, delete=False, prefix="tmp", suffix=cacheblock_suffix)
71 tmpfile = self.filehandle.name
72 os.chmod(tmpfile, stat.S_IRUSR | stat.S_IWUSR)
74 # aquire a shared lock, this tells other processes that
75 # we're using this block and to please not delete it.
76 fcntl.flock(self.filehandle, fcntl.LOCK_SH)
78 self.filehandle.write(value)
79 self.filehandle.flush()
80 os.rename(tmpfile, final)
83 self.content = mmap.mmap(self.filehandle.fileno(), 0, access=mmap.ACCESS_READ)
84 # only set the event when mmap is successful
88 if tmpfile is not None:
89 # If the tempfile hasn't been renamed on disk yet, try to delete it.
97 if self.content is None:
98 if self.linger is not None:
99 # If it is still lingering (object is still accessible
100 # through the weak reference) it is still taking up
102 content = self.linger()
103 if content is not None:
107 return len(self.content)
110 if self.content is None or len(self.content) == 0:
113 # The mmap region might be in use when we decided to evict
114 # it. This can happen if the cache is too small.
116 # If we call close() now, it'll throw an error if
117 # something tries to access it.
119 # However, we don't need to explicitly call mmap.close()
121 # I confirmed in mmapmodule.c that that both close
122 # and deallocate do the same thing:
124 # a) close the file descriptor
125 # b) unmap the memory range
127 # So we can forget it in the cache and delete the file on
128 # disk, and it will tear it down after any other
129 # lingering Python references to the mapped memory are
132 blockdir = os.path.join(self.cachedir, self.locator[0:3])
133 final = os.path.join(blockdir, self.locator) + cacheblock_suffix
135 fcntl.flock(self.filehandle, fcntl.LOCK_UN)
137 # try to get an exclusive lock, this ensures other
138 # processes are not using the block. It is
139 # nonblocking and will throw an exception if we
140 # can't get it, which is fine because that means
141 # we just won't try to delete it.
143 # I should note here, the file locking is not
144 # strictly necessary, we could just remove it and
145 # the kernel would ensure that the underlying
146 # inode remains available as long as other
147 # processes still have the file open. However, if
148 # you have multiple processes sharing the cache
149 # and deleting each other's files, you'll end up
150 # with a bunch of ghost files that don't show up
151 # in the file system but are still taking up
152 # space, which isn't particularly user friendly.
153 # The locking strategy ensures that cache blocks
154 # in use remain visible.
156 fcntl.flock(self.filehandle, fcntl.LOCK_EX | fcntl.LOCK_NB)
163 self.filehandle = None
167 def get_from_disk(locator, cachedir):
168 blockdir = os.path.join(cachedir, locator[0:3])
169 final = os.path.join(blockdir, locator) + cacheblock_suffix
172 filehandle = open(final, "rb")
174 # aquire a shared lock, this tells other processes that
175 # we're using this block and to please not delete it.
176 fcntl.flock(filehandle, fcntl.LOCK_SH)
178 content = mmap.mmap(filehandle.fileno(), 0, access=mmap.ACCESS_READ)
179 dc = DiskCacheSlot(locator, cachedir)
180 dc.filehandle = filehandle
184 except FileNotFoundError:
186 except Exception as e:
187 traceback.print_exc()
192 def cache_usage(cachedir):
194 for root, dirs, files in os.walk(cachedir):
196 if not name.endswith(cacheblock_suffix):
199 blockpath = os.path.join(root, name)
200 res = os.stat(blockpath)
206 def init_cache(cachedir, maxslots):
208 # First check the disk cache works at all by creating a 1 byte cache entry
210 checkexists = DiskCacheSlot.get_from_disk('0cc175b9c0f1b6a831c399e269772661', cachedir)
211 ds = DiskCacheSlot('0cc175b9c0f1b6a831c399e269772661', cachedir)
213 if checkexists is None:
214 # Don't keep the test entry around unless it existed beforehand.
217 # map in all the files in the cache directory, up to max slots.
218 # after max slots, try to delete the excess blocks.
220 # this gives the calling process ownership of all the blocks
223 for root, dirs, files in os.walk(cachedir):
225 if not name.endswith(cacheblock_suffix):
228 blockpath = os.path.join(root, name)
229 res = os.stat(blockpath)
231 if len(name) == (32+len(cacheblock_suffix)) and not name.startswith("tmp"):
232 blocks.append((name[0:32], res.st_atime))
233 elif name.startswith("tmp") and ((time.time() - res.st_mtime) > 60):
234 # found a temporary file more than 1 minute old,
241 # sort by access time (atime), going from most recently
242 # accessed (highest timestamp) to least recently accessed
243 # (lowest timestamp).
244 blocks.sort(key=lambda x: x[1], reverse=True)
246 # Map in all the files we found, up to maxslots, if we exceed
247 # maxslots, start throwing things out.
248 cachelist = collections.OrderedDict()
250 got = DiskCacheSlot.get_from_disk(b[0], cachedir)
253 if len(cachelist) < maxslots:
254 cachelist[got.locator] = got
256 # we found more blocks than maxslots, try to
257 # throw it out of the cache.