X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/c8bf4dc1eef2a73b4b01501eabdc922e2a27a276..35db495717a628e0a6ef52a453b8d8ced793c41b:/sdk/go/arvados/fs_collection_test.go diff --git a/sdk/go/arvados/fs_collection_test.go b/sdk/go/arvados/fs_collection_test.go index beb4d61fcf..73689e4ead 100644 --- a/sdk/go/arvados/fs_collection_test.go +++ b/sdk/go/arvados/fs_collection_test.go @@ -32,6 +32,7 @@ var _ = check.Suite(&CollectionFSSuite{}) type keepClientStub struct { blocks map[string][]byte refreshable map[string]bool + reads []string // locators from ReadAt() calls onWrite func(bufcopy []byte) // called from WriteBlock, before acquiring lock authToken string // client's auth token (used for signing locators) sigkey string // blob signing key @@ -42,8 +43,14 @@ type keepClientStub struct { var errStub404 = errors.New("404 block not found") func (kcs *keepClientStub) ReadAt(locator string, p []byte, off int) (int, error) { + kcs.Lock() + kcs.reads = append(kcs.reads, locator) + kcs.Unlock() kcs.RLock() defer kcs.RUnlock() + if err := VerifySignature(locator, kcs.authToken, kcs.sigttl, []byte(kcs.sigkey)); err != nil { + return 0, err + } buf := kcs.blocks[locator[:32]] if buf == nil { return 0, errStub404 @@ -102,6 +109,7 @@ type CollectionFSSuite struct { func (s *CollectionFSSuite) SetUpTest(c *check.C) { s.client = NewClientFromEnv() + s.client.AuthToken = fixtureActiveToken err := s.client.RequestAndDecode(&s.coll, "GET", "arvados/v1/collections/"+fixtureFooAndBarFilesInDirUUID, nil, nil) c.Assert(err, check.IsNil) s.kc = &keepClientStub{ @@ -1201,11 +1209,12 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) { } nDirs := int64(8) + nFiles := int64(67) megabyte := make([]byte, 1<<20) for i := int64(0); i < nDirs; i++ { dir := fmt.Sprintf("dir%d", i) fs.Mkdir(dir, 0755) - for j := 0; j < 67; j++ { + for j := int64(0); j < nFiles; j++ { f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, j), os.O_WRONLY|os.O_CREATE, 0) c.Assert(err, check.IsNil) defer f.Close() @@ -1213,7 +1222,8 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) { c.Assert(err, check.IsNil) } } - c.Check(fs.MemorySize(), check.Equals, int64(nDirs*67<<20)) + inodebytes := int64((nDirs*(nFiles+1) + 1) * 64) + c.Check(fs.MemorySize(), check.Equals, nDirs*nFiles*(1<<20+64)+inodebytes) c.Check(flushed, check.Equals, int64(0)) waitForFlush := func(expectUnflushed, expectFlushed int64) { @@ -1224,27 +1234,29 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) { } // Nothing flushed yet - waitForFlush((nDirs*67)<<20, 0) + waitForFlush(nDirs*nFiles*(1<<20+64)+inodebytes, 0) // Flushing a non-empty dir "/" is non-recursive and there are // no top-level files, so this has no effect fs.Flush("/", false) - waitForFlush((nDirs*67)<<20, 0) + waitForFlush(nDirs*nFiles*(1<<20+64)+inodebytes, 0) // Flush the full block in dir0 fs.Flush("dir0", false) - waitForFlush((nDirs*67-64)<<20, 64<<20) + bigloclen := int64(32 + 9 + 51 + 64) // md5 + "+" + "67xxxxxx" + "+Axxxxxx..." + 64 (see (storedSegment)memorySize) + waitForFlush((nDirs*nFiles-64)*(1<<20+64)+inodebytes+bigloclen*64, 64<<20) err = fs.Flush("dir-does-not-exist", false) c.Check(err, check.NotNil) // Flush full blocks in all dirs fs.Flush("", false) - waitForFlush(nDirs*3<<20, nDirs*64<<20) + waitForFlush(nDirs*3*(1<<20+64)+inodebytes+bigloclen*64*nDirs, nDirs*64<<20) // Flush non-full blocks, too fs.Flush("", true) - waitForFlush(0, nDirs*67<<20) + smallloclen := int64(32 + 8 + 51 + 64) // md5 + "+" + "3xxxxxx" + "+Axxxxxx..." + 64 (see (storedSegment)memorySize) + waitForFlush(inodebytes+bigloclen*64*nDirs+smallloclen*3*nDirs, nDirs*67<<20) } // Even when writing lots of files/dirs from different goroutines, as @@ -1433,6 +1445,127 @@ func (s *CollectionFSSuite) TestEdgeCaseManifests(c *check.C) { } } +func (s *CollectionFSSuite) TestSnapshotSplice(c *check.C) { + filedata1 := "hello snapshot+splice world\n" + fs, err := (&Collection{}).FileSystem(s.client, s.kc) + c.Assert(err, check.IsNil) + { + f, err := fs.OpenFile("file1", os.O_CREATE|os.O_RDWR, 0700) + c.Assert(err, check.IsNil) + _, err = f.Write([]byte(filedata1)) + c.Assert(err, check.IsNil) + err = f.Close() + c.Assert(err, check.IsNil) + } + + snap, err := Snapshot(fs, "/") + c.Assert(err, check.IsNil) + err = Splice(fs, "dir1", snap) + c.Assert(err, check.IsNil) + f, err := fs.Open("dir1/file1") + c.Assert(err, check.IsNil) + buf, err := io.ReadAll(f) + c.Assert(err, check.IsNil) + c.Check(string(buf), check.Equals, filedata1) +} + +func (s *CollectionFSSuite) TestRefreshSignatures(c *check.C) { + filedata1 := "hello refresh signatures world\n" + fs, err := (&Collection{}).FileSystem(s.client, s.kc) + c.Assert(err, check.IsNil) + fs.Mkdir("d1", 0700) + f, err := fs.OpenFile("d1/file1", os.O_CREATE|os.O_RDWR, 0700) + c.Assert(err, check.IsNil) + _, err = f.Write([]byte(filedata1)) + c.Assert(err, check.IsNil) + err = f.Close() + c.Assert(err, check.IsNil) + + filedata2 := "hello refresh signatures universe\n" + fs.Mkdir("d2", 0700) + f, err = fs.OpenFile("d2/file2", os.O_CREATE|os.O_RDWR, 0700) + c.Assert(err, check.IsNil) + _, err = f.Write([]byte(filedata2)) + c.Assert(err, check.IsNil) + err = f.Close() + c.Assert(err, check.IsNil) + txt, err := fs.MarshalManifest(".") + c.Assert(err, check.IsNil) + var saved Collection + err = s.client.RequestAndDecode(&saved, "POST", "arvados/v1/collections", nil, map[string]interface{}{ + "select": []string{"manifest_text", "uuid", "portable_data_hash"}, + "collection": map[string]interface{}{ + "manifest_text": txt, + }, + }) + c.Assert(err, check.IsNil) + + // Update signatures synchronously if they are already expired + // when Read() is called. + { + saved.ManifestText = SignManifest(saved.ManifestText, s.kc.authToken, time.Now().Add(-2*time.Second), s.kc.sigttl, []byte(s.kc.sigkey)) + fs, err := saved.FileSystem(s.client, s.kc) + c.Assert(err, check.IsNil) + f, err := fs.OpenFile("d1/file1", os.O_RDONLY, 0) + c.Assert(err, check.IsNil) + buf, err := ioutil.ReadAll(f) + c.Check(err, check.IsNil) + c.Check(string(buf), check.Equals, filedata1) + } + + // Update signatures asynchronously if we're more than half + // way to TTL when Read() is called. + { + exp := time.Now().Add(2 * time.Minute) + saved.ManifestText = SignManifest(saved.ManifestText, s.kc.authToken, exp, s.kc.sigttl, []byte(s.kc.sigkey)) + fs, err := saved.FileSystem(s.client, s.kc) + c.Assert(err, check.IsNil) + f1, err := fs.OpenFile("d1/file1", os.O_RDONLY, 0) + c.Assert(err, check.IsNil) + f2, err := fs.OpenFile("d2/file2", os.O_RDONLY, 0) + c.Assert(err, check.IsNil) + buf, err := ioutil.ReadAll(f1) + c.Check(err, check.IsNil) + c.Check(string(buf), check.Equals, filedata1) + + // Ensure fs treats the 2-minute TTL as less than half + // the server's signing TTL. If we don't do this, + // collectionfs will guess the signature is fresh, + // i.e., signing TTL is 2 minutes, and won't do an + // async refresh. + fs.(*collectionFileSystem).guessSignatureTTL = time.Hour + + refreshed := false + for deadline := time.Now().Add(time.Second * 10); time.Now().Before(deadline) && !refreshed; time.Sleep(time.Second / 10) { + _, err = f1.Seek(0, io.SeekStart) + c.Assert(err, check.IsNil) + buf, err = ioutil.ReadAll(f1) + c.Assert(err, check.IsNil) + c.Assert(string(buf), check.Equals, filedata1) + loc := s.kc.reads[len(s.kc.reads)-1] + t, err := signatureExpiryTime(loc) + c.Assert(err, check.IsNil) + c.Logf("last read block %s had signature expiry time %v", loc, t) + if t.Sub(time.Now()) > time.Hour { + refreshed = true + } + } + c.Check(refreshed, check.Equals, true) + + // Second locator should have been updated at the same + // time. + buf, err = ioutil.ReadAll(f2) + c.Assert(err, check.IsNil) + c.Assert(string(buf), check.Equals, filedata2) + loc := s.kc.reads[len(s.kc.reads)-1] + c.Check(loc, check.Not(check.Equals), s.kc.reads[len(s.kc.reads)-2]) + t, err := signatureExpiryTime(s.kc.reads[len(s.kc.reads)-1]) + c.Assert(err, check.IsNil) + c.Logf("last read block %s had signature expiry time %v", loc, t) + c.Check(t.Sub(time.Now()) > time.Hour, check.Equals, true) + } +} + var bigmanifest = func() string { var buf bytes.Buffer for i := 0; i < 2000; i++ {