19192: Add a few bytes to MemorySize to account for data structures.
authorTom Clegg <tom@curii.com>
Tue, 21 Jun 2022 03:16:30 +0000 (23:16 -0400)
committerTom Clegg <tom@curii.com>
Tue, 21 Jun 2022 03:16:30 +0000 (23:16 -0400)
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@curii.com>

sdk/go/arvados/fs_base.go
sdk/go/arvados/fs_collection.go
sdk/go/arvados/fs_collection_test.go
services/keep-web/server_test.go

index bebb74261e4767dd917a919f170fd47312c7939d..ce9253ab3d4f5d5447273cfe02edca716afc52fd 100644 (file)
@@ -415,7 +415,7 @@ func (n *treenode) MemorySize() (size int64) {
        for _, inode := range n.inodes {
                size += inode.MemorySize()
        }
        for _, inode := range n.inodes {
                size += inode.MemorySize()
        }
-       return
+       return 64 + size
 }
 
 type fileSystem struct {
 }
 
 type fileSystem struct {
index f4dae746e2a72a1a6384857749cf7261f5465917..ccfbdc4da262c13ee3d319ad072f73a10b9b1d0a 100644 (file)
@@ -1159,15 +1159,17 @@ func (dn *dirnode) MemorySize() (size int64) {
                case *dirnode:
                        size += node.MemorySize()
                case *filenode:
                case *dirnode:
                        size += node.MemorySize()
                case *filenode:
+                       size += 64
                        for _, seg := range node.segments {
                                switch seg := seg.(type) {
                                case *memSegment:
                                        size += int64(seg.Len())
                                }
                        for _, seg := range node.segments {
                                switch seg := seg.(type) {
                                case *memSegment:
                                        size += int64(seg.Len())
                                }
+                               size += 64
                        }
                }
        }
                        }
                }
        }
-       return
+       return 64 + size
 }
 
 // caller must have write lock.
 }
 
 // caller must have write lock.
index b221aaa083a12fd1f13fd3eaec3d9c0f85ae61b5..c2cac3c6ce2e963b36b7654729e56524ba9bc2db 100644 (file)
@@ -1221,7 +1221,8 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) {
                        c.Assert(err, check.IsNil)
                }
        }
                        c.Assert(err, check.IsNil)
                }
        }
-       c.Check(fs.MemorySize(), check.Equals, int64(nDirs*67<<20))
+       inodebytes := int64((nDirs*(67*2+1) + 1) * 64)
+       c.Check(fs.MemorySize(), check.Equals, int64(nDirs*67<<20)+inodebytes)
        c.Check(flushed, check.Equals, int64(0))
 
        waitForFlush := func(expectUnflushed, expectFlushed int64) {
        c.Check(flushed, check.Equals, int64(0))
 
        waitForFlush := func(expectUnflushed, expectFlushed int64) {
@@ -1232,27 +1233,27 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) {
        }
 
        // Nothing flushed yet
        }
 
        // Nothing flushed yet
-       waitForFlush((nDirs*67)<<20, 0)
+       waitForFlush((nDirs*67)<<20+inodebytes, 0)
 
        // Flushing a non-empty dir "/" is non-recursive and there are
        // no top-level files, so this has no effect
        fs.Flush("/", false)
 
        // Flushing a non-empty dir "/" is non-recursive and there are
        // no top-level files, so this has no effect
        fs.Flush("/", false)
-       waitForFlush((nDirs*67)<<20, 0)
+       waitForFlush((nDirs*67)<<20+inodebytes, 0)
 
        // Flush the full block in dir0
        fs.Flush("dir0", false)
 
        // Flush the full block in dir0
        fs.Flush("dir0", false)
-       waitForFlush((nDirs*67-64)<<20, 64<<20)
+       waitForFlush((nDirs*67-64)<<20+inodebytes, 64<<20)
 
        err = fs.Flush("dir-does-not-exist", false)
        c.Check(err, check.NotNil)
 
        // Flush full blocks in all dirs
        fs.Flush("", false)
 
        err = fs.Flush("dir-does-not-exist", false)
        c.Check(err, check.NotNil)
 
        // Flush full blocks in all dirs
        fs.Flush("", false)
-       waitForFlush(nDirs*3<<20, nDirs*64<<20)
+       waitForFlush(nDirs*3<<20+inodebytes, nDirs*64<<20)
 
        // Flush non-full blocks, too
        fs.Flush("", true)
 
        // Flush non-full blocks, too
        fs.Flush("", true)
-       waitForFlush(0, nDirs*67<<20)
+       waitForFlush(inodebytes, nDirs*67<<20)
 }
 
 // Even when writing lots of files/dirs from different goroutines, as
 }
 
 // Even when writing lots of files/dirs from different goroutines, as
index dd8ce06172a01b3b90774f7985d8ab686f450af8..61c540808b640d6115a76e3efb70e10928b2dba3 100644 (file)
@@ -480,8 +480,10 @@ func (s *IntegrationSuite) TestMetrics(c *check.C) {
        c.Check(counters["arvados_keepweb_collectioncache_hits//"].Value, check.Equals, int64(1))
        c.Check(counters["arvados_keepweb_collectioncache_pdh_hits//"].Value, check.Equals, int64(1))
        c.Check(gauges["arvados_keepweb_collectioncache_cached_manifests//"].Value, check.Equals, float64(1))
        c.Check(counters["arvados_keepweb_collectioncache_hits//"].Value, check.Equals, int64(1))
        c.Check(counters["arvados_keepweb_collectioncache_pdh_hits//"].Value, check.Equals, int64(1))
        c.Check(gauges["arvados_keepweb_collectioncache_cached_manifests//"].Value, check.Equals, float64(1))
-       // FooCollection's cached manifest size is 45 ("1f4b0....+45") plus one 51-byte blob signature
-       c.Check(gauges["arvados_keepweb_sessions_cached_collection_bytes//"].Value, check.Equals, float64(45+51))
+       // FooCollection's cached manifest size is 45 ("1f4b0....+45")
+       // plus one 51-byte blob signature; session fs counts 3 inodes
+       // * 64 bytes.
+       c.Check(gauges["arvados_keepweb_sessions_cached_collection_bytes//"].Value, check.Equals, float64(45+51+64*3))
 
        // If the Host header indicates a collection, /metrics.json
        // refers to a file in the collection -- the metrics handler
 
        // If the Host header indicates a collection, /metrics.json
        // refers to a file in the collection -- the metrics handler