19192: Add a few bytes to MemorySize to account for data structures.
[arvados.git] / sdk / go / arvados / fs_collection_test.go
index beb4d61fcf72ef7696952b3bf37179334ff3abd7..c2cac3c6ce2e963b36b7654729e56524ba9bc2db 100644 (file)
@@ -32,6 +32,7 @@ var _ = check.Suite(&CollectionFSSuite{})
 type keepClientStub struct {
        blocks      map[string][]byte
        refreshable map[string]bool
+       reads       []string             // locators from ReadAt() calls
        onWrite     func(bufcopy []byte) // called from WriteBlock, before acquiring lock
        authToken   string               // client's auth token (used for signing locators)
        sigkey      string               // blob signing key
@@ -42,8 +43,14 @@ type keepClientStub struct {
 var errStub404 = errors.New("404 block not found")
 
 func (kcs *keepClientStub) ReadAt(locator string, p []byte, off int) (int, error) {
+       kcs.Lock()
+       kcs.reads = append(kcs.reads, locator)
+       kcs.Unlock()
        kcs.RLock()
        defer kcs.RUnlock()
+       if err := VerifySignature(locator, kcs.authToken, kcs.sigttl, []byte(kcs.sigkey)); err != nil {
+               return 0, err
+       }
        buf := kcs.blocks[locator[:32]]
        if buf == nil {
                return 0, errStub404
@@ -102,6 +109,7 @@ type CollectionFSSuite struct {
 
 func (s *CollectionFSSuite) SetUpTest(c *check.C) {
        s.client = NewClientFromEnv()
+       s.client.AuthToken = fixtureActiveToken
        err := s.client.RequestAndDecode(&s.coll, "GET", "arvados/v1/collections/"+fixtureFooAndBarFilesInDirUUID, nil, nil)
        c.Assert(err, check.IsNil)
        s.kc = &keepClientStub{
@@ -1213,7 +1221,8 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) {
                        c.Assert(err, check.IsNil)
                }
        }
-       c.Check(fs.MemorySize(), check.Equals, int64(nDirs*67<<20))
+       inodebytes := int64((nDirs*(67*2+1) + 1) * 64)
+       c.Check(fs.MemorySize(), check.Equals, int64(nDirs*67<<20)+inodebytes)
        c.Check(flushed, check.Equals, int64(0))
 
        waitForFlush := func(expectUnflushed, expectFlushed int64) {
@@ -1224,27 +1233,27 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) {
        }
 
        // Nothing flushed yet
-       waitForFlush((nDirs*67)<<20, 0)
+       waitForFlush((nDirs*67)<<20+inodebytes, 0)
 
        // Flushing a non-empty dir "/" is non-recursive and there are
        // no top-level files, so this has no effect
        fs.Flush("/", false)
-       waitForFlush((nDirs*67)<<20, 0)
+       waitForFlush((nDirs*67)<<20+inodebytes, 0)
 
        // Flush the full block in dir0
        fs.Flush("dir0", false)
-       waitForFlush((nDirs*67-64)<<20, 64<<20)
+       waitForFlush((nDirs*67-64)<<20+inodebytes, 64<<20)
 
        err = fs.Flush("dir-does-not-exist", false)
        c.Check(err, check.NotNil)
 
        // Flush full blocks in all dirs
        fs.Flush("", false)
-       waitForFlush(nDirs*3<<20, nDirs*64<<20)
+       waitForFlush(nDirs*3<<20+inodebytes, nDirs*64<<20)
 
        // Flush non-full blocks, too
        fs.Flush("", true)
-       waitForFlush(0, nDirs*67<<20)
+       waitForFlush(inodebytes, nDirs*67<<20)
 }
 
 // Even when writing lots of files/dirs from different goroutines, as
@@ -1433,6 +1442,127 @@ func (s *CollectionFSSuite) TestEdgeCaseManifests(c *check.C) {
        }
 }
 
+func (s *CollectionFSSuite) TestSnapshotSplice(c *check.C) {
+       filedata1 := "hello snapshot+splice world\n"
+       fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+       {
+               f, err := fs.OpenFile("file1", os.O_CREATE|os.O_RDWR, 0700)
+               c.Assert(err, check.IsNil)
+               _, err = f.Write([]byte(filedata1))
+               c.Assert(err, check.IsNil)
+               err = f.Close()
+               c.Assert(err, check.IsNil)
+       }
+
+       snap, err := Snapshot(fs, "/")
+       c.Assert(err, check.IsNil)
+       err = Splice(fs, "dir1", snap)
+       c.Assert(err, check.IsNil)
+       f, err := fs.Open("dir1/file1")
+       c.Assert(err, check.IsNil)
+       buf, err := io.ReadAll(f)
+       c.Assert(err, check.IsNil)
+       c.Check(string(buf), check.Equals, filedata1)
+}
+
+func (s *CollectionFSSuite) TestRefreshSignatures(c *check.C) {
+       filedata1 := "hello refresh signatures world\n"
+       fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+       fs.Mkdir("d1", 0700)
+       f, err := fs.OpenFile("d1/file1", os.O_CREATE|os.O_RDWR, 0700)
+       c.Assert(err, check.IsNil)
+       _, err = f.Write([]byte(filedata1))
+       c.Assert(err, check.IsNil)
+       err = f.Close()
+       c.Assert(err, check.IsNil)
+
+       filedata2 := "hello refresh signatures universe\n"
+       fs.Mkdir("d2", 0700)
+       f, err = fs.OpenFile("d2/file2", os.O_CREATE|os.O_RDWR, 0700)
+       c.Assert(err, check.IsNil)
+       _, err = f.Write([]byte(filedata2))
+       c.Assert(err, check.IsNil)
+       err = f.Close()
+       c.Assert(err, check.IsNil)
+       txt, err := fs.MarshalManifest(".")
+       c.Assert(err, check.IsNil)
+       var saved Collection
+       err = s.client.RequestAndDecode(&saved, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+               "select": []string{"manifest_text", "uuid", "portable_data_hash"},
+               "collection": map[string]interface{}{
+                       "manifest_text": txt,
+               },
+       })
+       c.Assert(err, check.IsNil)
+
+       // Update signatures synchronously if they are already expired
+       // when Read() is called.
+       {
+               saved.ManifestText = SignManifest(saved.ManifestText, s.kc.authToken, time.Now().Add(-2*time.Second), s.kc.sigttl, []byte(s.kc.sigkey))
+               fs, err := saved.FileSystem(s.client, s.kc)
+               c.Assert(err, check.IsNil)
+               f, err := fs.OpenFile("d1/file1", os.O_RDONLY, 0)
+               c.Assert(err, check.IsNil)
+               buf, err := ioutil.ReadAll(f)
+               c.Check(err, check.IsNil)
+               c.Check(string(buf), check.Equals, filedata1)
+       }
+
+       // Update signatures asynchronously if we're more than half
+       // way to TTL when Read() is called.
+       {
+               exp := time.Now().Add(2 * time.Minute)
+               saved.ManifestText = SignManifest(saved.ManifestText, s.kc.authToken, exp, s.kc.sigttl, []byte(s.kc.sigkey))
+               fs, err := saved.FileSystem(s.client, s.kc)
+               c.Assert(err, check.IsNil)
+               f1, err := fs.OpenFile("d1/file1", os.O_RDONLY, 0)
+               c.Assert(err, check.IsNil)
+               f2, err := fs.OpenFile("d2/file2", os.O_RDONLY, 0)
+               c.Assert(err, check.IsNil)
+               buf, err := ioutil.ReadAll(f1)
+               c.Check(err, check.IsNil)
+               c.Check(string(buf), check.Equals, filedata1)
+
+               // Ensure fs treats the 2-minute TTL as less than half
+               // the server's signing TTL. If we don't do this,
+               // collectionfs will guess the signature is fresh,
+               // i.e., signing TTL is 2 minutes, and won't do an
+               // async refresh.
+               fs.(*collectionFileSystem).guessSignatureTTL = time.Hour
+
+               refreshed := false
+               for deadline := time.Now().Add(time.Second * 10); time.Now().Before(deadline) && !refreshed; time.Sleep(time.Second / 10) {
+                       _, err = f1.Seek(0, io.SeekStart)
+                       c.Assert(err, check.IsNil)
+                       buf, err = ioutil.ReadAll(f1)
+                       c.Assert(err, check.IsNil)
+                       c.Assert(string(buf), check.Equals, filedata1)
+                       loc := s.kc.reads[len(s.kc.reads)-1]
+                       t, err := signatureExpiryTime(loc)
+                       c.Assert(err, check.IsNil)
+                       c.Logf("last read block %s had signature expiry time %v", loc, t)
+                       if t.Sub(time.Now()) > time.Hour {
+                               refreshed = true
+                       }
+               }
+               c.Check(refreshed, check.Equals, true)
+
+               // Second locator should have been updated at the same
+               // time.
+               buf, err = ioutil.ReadAll(f2)
+               c.Assert(err, check.IsNil)
+               c.Assert(string(buf), check.Equals, filedata2)
+               loc := s.kc.reads[len(s.kc.reads)-1]
+               c.Check(loc, check.Not(check.Equals), s.kc.reads[len(s.kc.reads)-2])
+               t, err := signatureExpiryTime(s.kc.reads[len(s.kc.reads)-1])
+               c.Assert(err, check.IsNil)
+               c.Logf("last read block %s had signature expiry time %v", loc, t)
+               c.Check(t.Sub(time.Now()) > time.Hour, check.Equals, true)
+       }
+}
+
 var bigmanifest = func() string {
        var buf bytes.Buffer
        for i := 0; i < 2000; i++ {