return fn.fs
}
- size += 64
- if seg, ok := seg.(*memSegment); ok {
- size += int64(seg.Len())
- }
+func (fn *filenode) MemorySize() (size int64) {
+ fn.RLock()
+ defer fn.RUnlock()
+ size = 64
+ for _, seg := range fn.segments {
++ size += seg.memorySize()
+ }
+ return
+}
+
// Read reads file data from a single segment, starting at startPtr,
// into p. startPtr is assumed not to be up-to-date. Caller must have
// RLock or Lock.
// Return a new segment with a subsection of the data from this
// one. length<0 means length=Len()-off.
Slice(off int, length int) segment
++ memorySize() int64
}
type memSegment struct {
return
}
++func (me *memSegment) memorySize() int64 {
++ return 64 + int64(len(me.buf))
++}
++
type storedSegment struct {
kc fsBackend
locator string
return se.kc.ReadAt(se.locator, p, int(off)+se.offset)
}
++func (se storedSegment) memorySize() int64 {
++ return 64 + int64(len(se.locator))
++}
++
func canonicalName(name string) string {
name = path.Clean("/" + name)
if name == "/" || name == "./" {
}
nDirs := int64(8)
++ nFiles := int64(67)
megabyte := make([]byte, 1<<20)
for i := int64(0); i < nDirs; i++ {
dir := fmt.Sprintf("dir%d", i)
fs.Mkdir(dir, 0755)
-- for j := 0; j < 67; j++ {
++ for j := int64(0); j < nFiles; j++ {
f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, j), os.O_WRONLY|os.O_CREATE, 0)
c.Assert(err, check.IsNil)
defer f.Close()
c.Assert(err, check.IsNil)
}
}
- inodebytes := int64((nDirs*(67*2+1) + 1) * 64)
- c.Check(fs.MemorySize(), check.Equals, int64(nDirs*67<<20)+inodebytes)
- inodebytes := int64((nDirs*(67+1) + 1) * 64)
- c.Check(fs.MemorySize(), check.Equals, int64(nDirs*67*(1<<20+8))+inodebytes)
++ inodebytes := int64((nDirs*(nFiles+1) + 1) * 64)
++ c.Check(fs.MemorySize(), check.Equals, nDirs*nFiles*(1<<20+64)+inodebytes)
c.Check(flushed, check.Equals, int64(0))
waitForFlush := func(expectUnflushed, expectFlushed int64) {
}
// Nothing flushed yet
- waitForFlush((nDirs*67)<<20+inodebytes, 0)
- waitForFlush(nDirs*67*(1<<20+8)+inodebytes, 0)
++ waitForFlush(nDirs*nFiles*(1<<20+64)+inodebytes, 0)
// Flushing a non-empty dir "/" is non-recursive and there are
// no top-level files, so this has no effect
fs.Flush("/", false)
- waitForFlush((nDirs*67)<<20+inodebytes, 0)
- waitForFlush(nDirs*67*(1<<20+8)+inodebytes, 0)
++ waitForFlush(nDirs*nFiles*(1<<20+64)+inodebytes, 0)
// Flush the full block in dir0
fs.Flush("dir0", false)
- waitForFlush((nDirs*67-64)<<20+inodebytes, 64<<20)
- bigloclen := int64(32 + 9 + 51 + 40) // md5 + "+" + "67xxxxxx" + "+Axxxxxx..." + 40 (see (*dirnode)MemorySize)
- waitForFlush((nDirs*67-64)*(1<<20+8)+inodebytes+bigloclen*64, 64<<20)
++ bigloclen := int64(32 + 9 + 51 + 64) // md5 + "+" + "67xxxxxx" + "+Axxxxxx..." + 64 (see (storedSegment)memorySize)
++ waitForFlush((nDirs*nFiles-64)*(1<<20+64)+inodebytes+bigloclen*64, 64<<20)
err = fs.Flush("dir-does-not-exist", false)
c.Check(err, check.NotNil)
// Flush full blocks in all dirs
fs.Flush("", false)
- waitForFlush(nDirs*3<<20+inodebytes, nDirs*64<<20)
- waitForFlush(nDirs*3*(1<<20+8)+inodebytes+bigloclen*64*nDirs, nDirs*64<<20)
++ waitForFlush(nDirs*3*(1<<20+64)+inodebytes+bigloclen*64*nDirs, nDirs*64<<20)
// Flush non-full blocks, too
fs.Flush("", true)
- waitForFlush(inodebytes, nDirs*67<<20)
- smallloclen := int64(32 + 8 + 51 + 40) // md5 + "+" + "3xxxxxx" + "+Axxxxxx..." + 40 (see (*dirnode)MemorySize)
++ smallloclen := int64(32 + 8 + 51 + 64) // md5 + "+" + "3xxxxxx" + "+Axxxxxx..." + 64 (see (storedSegment)memorySize)
+ waitForFlush(inodebytes+bigloclen*64*nDirs+smallloclen*3*nDirs, nDirs*67<<20)
}
// Even when writing lots of files/dirs from different goroutines, as
}
// Remove tokens until reaching size limit, starting with the
// least frequently used entries (which Keys() returns last).
- for i := len(keys) - 1; i >= 0 && size > c.cluster.Collections.WebDAVCache.MaxCollectionBytes/2; i-- {
- for i := len(keys) - 1; i >= 0; i-- {
- token := keys[i]
- if size <= c.cluster.Collections.WebDAVCache.MaxCollectionBytes {
- break
- }
- ent, ok := c.sessions.Peek(token)
- if !ok {
- continue
- }
- s := ent.(*cachedSession)
- fs, _ := s.fs.Load().(arvados.CustomFileSystem)
- if fs == nil {
- continue
++ for i := len(keys) - 1; i >= 0 && size > c.cluster.Collections.WebDAVCache.MaxCollectionBytes; i-- {
+ if sizes[i] > 0 {
+ c.sessions.Remove(keys[i])
+ size -= sizes[i]
}
- c.sessions.Remove(token)
- size -= fs.MemorySize()
}
}
c.Check(summaries["request_duration_seconds/get/200"].SampleCount, check.Equals, "3")
c.Check(summaries["request_duration_seconds/get/404"].SampleCount, check.Equals, "1")
c.Check(summaries["time_to_status_seconds/get/404"].SampleCount, check.Equals, "1")
- c.Check(counters["arvados_keepweb_collectioncache_requests//"].Value, check.Equals, int64(2))
- c.Check(counters["arvados_keepweb_collectioncache_api_calls//"].Value, check.Equals, int64(2))
- c.Check(counters["arvados_keepweb_collectioncache_hits//"].Value, check.Equals, int64(1))
- c.Check(counters["arvados_keepweb_collectioncache_pdh_hits//"].Value, check.Equals, int64(1))
- c.Check(gauges["arvados_keepweb_collectioncache_cached_manifests//"].Value, check.Equals, float64(1))
- // FooCollection's cached manifest size is 45 ("1f4b0....+45")
- // plus one 51-byte blob signature; session fs counts 3 inodes
- // * 64 bytes.
- c.Check(gauges["arvados_keepweb_sessions_cached_collection_bytes//"].Value, check.Equals, float64(45+51+64*3))
- c.Check(gauges["arvados_keepweb_sessions_cached_session_bytes//"].Value, check.Equals, float64(445))
++ c.Check(gauges["arvados_keepweb_sessions_cached_session_bytes//"].Value, check.Equals, float64(384))
// If the Host header indicates a collection, /metrics.json
// refers to a file in the collection -- the metrics handler