X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/b714ab7401074991afe2fdc239c89107b3af6ca1..8991b43990aa7a77edd78f165114b93a6a207985:/sdk/go/arvados/fs_collection_test.go diff --git a/sdk/go/arvados/fs_collection_test.go b/sdk/go/arvados/fs_collection_test.go index fe3ad7a1e9..59a6a6ba82 100644 --- a/sdk/go/arvados/fs_collection_test.go +++ b/sdk/go/arvados/fs_collection_test.go @@ -7,7 +7,6 @@ package arvados import ( "bytes" "crypto/md5" - "crypto/sha1" "errors" "fmt" "io" @@ -17,6 +16,7 @@ import ( "os" "regexp" "runtime" + "runtime/pprof" "strings" "sync" "sync/atomic" @@ -32,6 +32,9 @@ type keepClientStub struct { blocks map[string][]byte refreshable map[string]bool onPut func(bufcopy []byte) // called from PutB, before acquiring lock + authToken string // client's auth token (used for signing locators) + sigkey string // blob signing key + sigttl time.Duration // blob signing ttl sync.RWMutex } @@ -48,7 +51,7 @@ func (kcs *keepClientStub) ReadAt(locator string, p []byte, off int) (int, error } func (kcs *keepClientStub) PutB(p []byte) (string, int, error) { - locator := fmt.Sprintf("%x+%d+A12345@abcde", md5.Sum(p), len(p)) + locator := SignLocator(fmt.Sprintf("%x+%d", md5.Sum(p), len(p)), kcs.authToken, time.Now().Add(kcs.sigttl), kcs.sigttl, []byte(kcs.sigkey)) buf := make([]byte, len(p)) copy(buf, p) if kcs.onPut != nil { @@ -60,9 +63,12 @@ func (kcs *keepClientStub) PutB(p []byte) (string, int, error) { return locator, 1, nil } -var localOrRemoteSignature = regexp.MustCompile(`\+[AR][^+]*`) +var reRemoteSignature = regexp.MustCompile(`\+[AR][^+]*`) func (kcs *keepClientStub) LocalLocator(locator string) (string, error) { + if strings.Contains(locator, "+A") { + return locator, nil + } kcs.Lock() defer kcs.Unlock() if strings.Contains(locator, "+R") { @@ -73,8 +79,9 @@ func (kcs *keepClientStub) LocalLocator(locator string) (string, error) { return "", fmt.Errorf("kcs.refreshable[%q]==false", locator) } } - fakeSig := fmt.Sprintf("+A%x@%x", sha1.Sum(nil), time.Now().Add(time.Hour*24*14).Unix()) - return localOrRemoteSignature.ReplaceAllLiteralString(locator, fakeSig), nil + locator = reRemoteSignature.ReplaceAllLiteralString(locator, "") + locator = SignLocator(locator, kcs.authToken, time.Now().Add(kcs.sigttl), kcs.sigttl, []byte(kcs.sigkey)) + return locator, nil } type CollectionFSSuite struct { @@ -91,7 +98,11 @@ func (s *CollectionFSSuite) SetUpTest(c *check.C) { s.kc = &keepClientStub{ blocks: map[string][]byte{ "3858f62230ac3c915f300c664312c63f": []byte("foobar"), - }} + }, + sigkey: fixtureBlobSigningKey, + sigttl: fixtureBlobSigningTTL, + authToken: fixtureActiveToken, + } s.fs, err = s.coll.FileSystem(s.client, s.kc) c.Assert(err, check.IsNil) } @@ -1040,11 +1051,11 @@ func (s *CollectionFSSuite) TestOpenFileFlags(c *check.C) { } func (s *CollectionFSSuite) TestFlushFullBlocksWritingLongFile(c *check.C) { - defer func(wab, mbs int) { - writeAheadBlocks = wab + defer func(cw, mbs int) { + concurrentWriters = cw maxBlockSize = mbs - }(writeAheadBlocks, maxBlockSize) - writeAheadBlocks = 2 + }(concurrentWriters, maxBlockSize) + concurrentWriters = 2 maxBlockSize = 1024 proceed := make(chan struct{}) @@ -1069,7 +1080,7 @@ func (s *CollectionFSSuite) TestFlushFullBlocksWritingLongFile(c *check.C) { default: time.Sleep(time.Millisecond) } - c.Check(atomic.AddInt32(&concurrent, -1) < int32(writeAheadBlocks), check.Equals, true) + c.Check(atomic.AddInt32(&concurrent, -1) < int32(concurrentWriters), check.Equals, true) } fs, err := (&Collection{}).FileSystem(s.client, s.kc) @@ -1139,7 +1150,7 @@ func (s *CollectionFSSuite) TestFlushAll(c *check.C) { } if i%8 == 0 { - fs.Flush(true) + fs.Flush("", true) } size := fs.memorySize() @@ -1164,9 +1175,9 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) { atomic.AddInt64(&flushed, int64(len(p))) } - nDirs := 8 + nDirs := int64(8) megabyte := make([]byte, 1<<20) - for i := 0; i < nDirs; i++ { + for i := int64(0); i < nDirs; i++ { dir := fmt.Sprintf("dir%d", i) fs.Mkdir(dir, 0755) for j := 0; j < 67; j++ { @@ -1180,14 +1191,172 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) { c.Check(fs.memorySize(), check.Equals, int64(nDirs*67<<20)) c.Check(flushed, check.Equals, int64(0)) - fs.Flush(false) - expectSize := int64(nDirs * 3 << 20) + waitForFlush := func(expectUnflushed, expectFlushed int64) { + for deadline := time.Now().Add(5 * time.Second); fs.memorySize() > expectUnflushed && time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) { + } + c.Check(fs.memorySize(), check.Equals, expectUnflushed) + c.Check(flushed, check.Equals, expectFlushed) + } + + // Nothing flushed yet + waitForFlush((nDirs*67)<<20, 0) + + // Flushing a non-empty dir "/" is non-recursive and there are + // no top-level files, so this has no effect + fs.Flush("/", false) + waitForFlush((nDirs*67)<<20, 0) + + // Flush the full block in dir0 + fs.Flush("dir0", false) + waitForFlush((nDirs*67-64)<<20, 64<<20) + + err = fs.Flush("dir-does-not-exist", false) + c.Check(err, check.NotNil) + + // Flush full blocks in all dirs + fs.Flush("", false) + waitForFlush(nDirs*3<<20, nDirs*64<<20) + + // Flush non-full blocks, too + fs.Flush("", true) + waitForFlush(0, nDirs*67<<20) +} + +// Even when writing lots of files/dirs from different goroutines, as +// long as Flush(dir,false) is called after writing each file, +// unflushed data should be limited to one full block per +// concurrentWriter, plus one nearly-full block at the end of each +// dir/stream. +func (s *CollectionFSSuite) TestMaxUnflushed(c *check.C) { + nDirs := int64(8) + maxUnflushed := (int64(concurrentWriters) + nDirs) << 26 + + fs, err := (&Collection{}).FileSystem(s.client, s.kc) + c.Assert(err, check.IsNil) + + release := make(chan struct{}) + timeout := make(chan struct{}) + time.AfterFunc(10*time.Second, func() { close(timeout) }) + var putCount, concurrency int64 + var unflushed int64 + s.kc.onPut = func(p []byte) { + defer atomic.AddInt64(&unflushed, -int64(len(p))) + cur := atomic.AddInt64(&concurrency, 1) + defer atomic.AddInt64(&concurrency, -1) + pc := atomic.AddInt64(&putCount, 1) + if pc < int64(concurrentWriters) { + // Block until we reach concurrentWriters, to + // make sure we're really accepting concurrent + // writes. + select { + case <-release: + case <-timeout: + c.Error("timeout") + } + } else if pc == int64(concurrentWriters) { + // Unblock the first N-1 PUT reqs. + close(release) + } + c.Assert(cur <= int64(concurrentWriters), check.Equals, true) + c.Assert(atomic.LoadInt64(&unflushed) <= maxUnflushed, check.Equals, true) + } + + var owg sync.WaitGroup + megabyte := make([]byte, 1<<20) + for i := int64(0); i < nDirs; i++ { + dir := fmt.Sprintf("dir%d", i) + fs.Mkdir(dir, 0755) + owg.Add(1) + go func() { + defer owg.Done() + defer fs.Flush(dir, true) + var iwg sync.WaitGroup + defer iwg.Wait() + for j := 0; j < 67; j++ { + iwg.Add(1) + go func(j int) { + defer iwg.Done() + f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, j), os.O_WRONLY|os.O_CREATE, 0) + c.Assert(err, check.IsNil) + defer f.Close() + n, err := f.Write(megabyte) + c.Assert(err, check.IsNil) + atomic.AddInt64(&unflushed, int64(n)) + fs.Flush(dir, false) + }(j) + } + }() + } + owg.Wait() + fs.Flush("", true) +} + +func (s *CollectionFSSuite) TestFlushStress(c *check.C) { + done := false + defer func() { done = true }() + time.AfterFunc(10*time.Second, func() { + if !done { + pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) + panic("timeout") + } + }) + + wrote := 0 + s.kc.onPut = func(p []byte) { + s.kc.Lock() + s.kc.blocks = map[string][]byte{} + wrote++ + defer c.Logf("wrote block %d, %d bytes", wrote, len(p)) + s.kc.Unlock() + time.Sleep(20 * time.Millisecond) + } + + fs, err := (&Collection{}).FileSystem(s.client, s.kc) + c.Assert(err, check.IsNil) - // Wait for flush to finish - for deadline := time.Now().Add(5 * time.Second); fs.memorySize() > expectSize && time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) { + data := make([]byte, 1<<20) + for i := 0; i < 3; i++ { + dir := fmt.Sprintf("dir%d", i) + fs.Mkdir(dir, 0755) + for j := 0; j < 200; j++ { + data[0] = byte(j) + f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, j), os.O_WRONLY|os.O_CREATE, 0) + c.Assert(err, check.IsNil) + _, err = f.Write(data) + c.Assert(err, check.IsNil) + f.Close() + fs.Flush(dir, false) + } + _, err := fs.MarshalManifest(".") + c.Check(err, check.IsNil) + } +} + +func (s *CollectionFSSuite) TestFlushShort(c *check.C) { + s.kc.onPut = func([]byte) { + s.kc.Lock() + s.kc.blocks = map[string][]byte{} + s.kc.Unlock() + } + fs, err := (&Collection{}).FileSystem(s.client, s.kc) + c.Assert(err, check.IsNil) + for _, blocksize := range []int{8, 1000000} { + dir := fmt.Sprintf("dir%d", blocksize) + err = fs.Mkdir(dir, 0755) + c.Assert(err, check.IsNil) + data := make([]byte, blocksize) + for i := 0; i < 100; i++ { + f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, i), os.O_WRONLY|os.O_CREATE, 0) + c.Assert(err, check.IsNil) + _, err = f.Write(data) + c.Assert(err, check.IsNil) + f.Close() + fs.Flush(dir, false) + } + fs.Flush(dir, true) + _, err := fs.MarshalManifest(".") + c.Check(err, check.IsNil) } - c.Check(fs.memorySize(), check.Equals, expectSize) - c.Check(flushed, check.Equals, int64(nDirs*64<<20)) } func (s *CollectionFSSuite) TestBrokenManifests(c *check.C) {