16171: Configurable "email" and "email_verified" OIDC claim keys.
[arvados.git] / sdk / go / arvados / fs_collection_test.go
index fe3ad7a1e9a44041258e1b099733e75323f4270a..f01369a885ece3b7c315c832b114caaf77715862 100644 (file)
@@ -17,6 +17,7 @@ import (
        "os"
        "regexp"
        "runtime"
+       "runtime/pprof"
        "strings"
        "sync"
        "sync/atomic"
@@ -1040,11 +1041,11 @@ func (s *CollectionFSSuite) TestOpenFileFlags(c *check.C) {
 }
 
 func (s *CollectionFSSuite) TestFlushFullBlocksWritingLongFile(c *check.C) {
-       defer func(wab, mbs int) {
-               writeAheadBlocks = wab
+       defer func(cw, mbs int) {
+               concurrentWriters = cw
                maxBlockSize = mbs
-       }(writeAheadBlocks, maxBlockSize)
-       writeAheadBlocks = 2
+       }(concurrentWriters, maxBlockSize)
+       concurrentWriters = 2
        maxBlockSize = 1024
 
        proceed := make(chan struct{})
@@ -1069,7 +1070,7 @@ func (s *CollectionFSSuite) TestFlushFullBlocksWritingLongFile(c *check.C) {
                default:
                        time.Sleep(time.Millisecond)
                }
-               c.Check(atomic.AddInt32(&concurrent, -1) < int32(writeAheadBlocks), check.Equals, true)
+               c.Check(atomic.AddInt32(&concurrent, -1) < int32(concurrentWriters), check.Equals, true)
        }
 
        fs, err := (&Collection{}).FileSystem(s.client, s.kc)
@@ -1139,7 +1140,7 @@ func (s *CollectionFSSuite) TestFlushAll(c *check.C) {
                }
 
                if i%8 == 0 {
-                       fs.Flush(true)
+                       fs.Flush("", true)
                }
 
                size := fs.memorySize()
@@ -1164,9 +1165,9 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) {
                atomic.AddInt64(&flushed, int64(len(p)))
        }
 
-       nDirs := 8
+       nDirs := int64(8)
        megabyte := make([]byte, 1<<20)
-       for i := 0; i < nDirs; i++ {
+       for i := int64(0); i < nDirs; i++ {
                dir := fmt.Sprintf("dir%d", i)
                fs.Mkdir(dir, 0755)
                for j := 0; j < 67; j++ {
@@ -1180,14 +1181,172 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) {
        c.Check(fs.memorySize(), check.Equals, int64(nDirs*67<<20))
        c.Check(flushed, check.Equals, int64(0))
 
-       fs.Flush(false)
-       expectSize := int64(nDirs * 3 << 20)
+       waitForFlush := func(expectUnflushed, expectFlushed int64) {
+               for deadline := time.Now().Add(5 * time.Second); fs.memorySize() > expectUnflushed && time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) {
+               }
+               c.Check(fs.memorySize(), check.Equals, expectUnflushed)
+               c.Check(flushed, check.Equals, expectFlushed)
+       }
+
+       // Nothing flushed yet
+       waitForFlush((nDirs*67)<<20, 0)
+
+       // Flushing a non-empty dir "/" is non-recursive and there are
+       // no top-level files, so this has no effect
+       fs.Flush("/", false)
+       waitForFlush((nDirs*67)<<20, 0)
+
+       // Flush the full block in dir0
+       fs.Flush("dir0", false)
+       waitForFlush((nDirs*67-64)<<20, 64<<20)
+
+       err = fs.Flush("dir-does-not-exist", false)
+       c.Check(err, check.NotNil)
+
+       // Flush full blocks in all dirs
+       fs.Flush("", false)
+       waitForFlush(nDirs*3<<20, nDirs*64<<20)
+
+       // Flush non-full blocks, too
+       fs.Flush("", true)
+       waitForFlush(0, nDirs*67<<20)
+}
+
+// Even when writing lots of files/dirs from different goroutines, as
+// long as Flush(dir,false) is called after writing each file,
+// unflushed data should be limited to one full block per
+// concurrentWriter, plus one nearly-full block at the end of each
+// dir/stream.
+func (s *CollectionFSSuite) TestMaxUnflushed(c *check.C) {
+       nDirs := int64(8)
+       maxUnflushed := (int64(concurrentWriters) + nDirs) << 26
+
+       fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+
+       release := make(chan struct{})
+       timeout := make(chan struct{})
+       time.AfterFunc(10*time.Second, func() { close(timeout) })
+       var putCount, concurrency int64
+       var unflushed int64
+       s.kc.onPut = func(p []byte) {
+               defer atomic.AddInt64(&unflushed, -int64(len(p)))
+               cur := atomic.AddInt64(&concurrency, 1)
+               defer atomic.AddInt64(&concurrency, -1)
+               pc := atomic.AddInt64(&putCount, 1)
+               if pc < int64(concurrentWriters) {
+                       // Block until we reach concurrentWriters, to
+                       // make sure we're really accepting concurrent
+                       // writes.
+                       select {
+                       case <-release:
+                       case <-timeout:
+                               c.Error("timeout")
+                       }
+               } else if pc == int64(concurrentWriters) {
+                       // Unblock the first N-1 PUT reqs.
+                       close(release)
+               }
+               c.Assert(cur <= int64(concurrentWriters), check.Equals, true)
+               c.Assert(atomic.LoadInt64(&unflushed) <= maxUnflushed, check.Equals, true)
+       }
+
+       var owg sync.WaitGroup
+       megabyte := make([]byte, 1<<20)
+       for i := int64(0); i < nDirs; i++ {
+               dir := fmt.Sprintf("dir%d", i)
+               fs.Mkdir(dir, 0755)
+               owg.Add(1)
+               go func() {
+                       defer owg.Done()
+                       defer fs.Flush(dir, true)
+                       var iwg sync.WaitGroup
+                       defer iwg.Wait()
+                       for j := 0; j < 67; j++ {
+                               iwg.Add(1)
+                               go func(j int) {
+                                       defer iwg.Done()
+                                       f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, j), os.O_WRONLY|os.O_CREATE, 0)
+                                       c.Assert(err, check.IsNil)
+                                       defer f.Close()
+                                       n, err := f.Write(megabyte)
+                                       c.Assert(err, check.IsNil)
+                                       atomic.AddInt64(&unflushed, int64(n))
+                                       fs.Flush(dir, false)
+                               }(j)
+                       }
+               }()
+       }
+       owg.Wait()
+       fs.Flush("", true)
+}
+
+func (s *CollectionFSSuite) TestFlushStress(c *check.C) {
+       done := false
+       defer func() { done = true }()
+       time.AfterFunc(10*time.Second, func() {
+               if !done {
+                       pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
+                       panic("timeout")
+               }
+       })
+
+       wrote := 0
+       s.kc.onPut = func(p []byte) {
+               s.kc.Lock()
+               s.kc.blocks = map[string][]byte{}
+               wrote++
+               defer c.Logf("wrote block %d, %d bytes", wrote, len(p))
+               s.kc.Unlock()
+               time.Sleep(20 * time.Millisecond)
+       }
+
+       fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+
+       data := make([]byte, 1<<20)
+       for i := 0; i < 3; i++ {
+               dir := fmt.Sprintf("dir%d", i)
+               fs.Mkdir(dir, 0755)
+               for j := 0; j < 200; j++ {
+                       data[0] = byte(j)
+                       f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, j), os.O_WRONLY|os.O_CREATE, 0)
+                       c.Assert(err, check.IsNil)
+                       _, err = f.Write(data)
+                       c.Assert(err, check.IsNil)
+                       f.Close()
+                       fs.Flush(dir, false)
+               }
+               _, err := fs.MarshalManifest(".")
+               c.Check(err, check.IsNil)
+       }
+}
 
-       // Wait for flush to finish
-       for deadline := time.Now().Add(5 * time.Second); fs.memorySize() > expectSize && time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) {
+func (s *CollectionFSSuite) TestFlushShort(c *check.C) {
+       s.kc.onPut = func([]byte) {
+               s.kc.Lock()
+               s.kc.blocks = map[string][]byte{}
+               s.kc.Unlock()
+       }
+       fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+       c.Assert(err, check.IsNil)
+       for _, blocksize := range []int{8, 1000000} {
+               dir := fmt.Sprintf("dir%d", blocksize)
+               err = fs.Mkdir(dir, 0755)
+               c.Assert(err, check.IsNil)
+               data := make([]byte, blocksize)
+               for i := 0; i < 100; i++ {
+                       f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, i), os.O_WRONLY|os.O_CREATE, 0)
+                       c.Assert(err, check.IsNil)
+                       _, err = f.Write(data)
+                       c.Assert(err, check.IsNil)
+                       f.Close()
+                       fs.Flush(dir, false)
+               }
+               fs.Flush(dir, true)
+               _, err := fs.MarshalManifest(".")
+               c.Check(err, check.IsNil)
        }
-       c.Check(fs.memorySize(), check.Equals, expectSize)
-       c.Check(flushed, check.Equals, int64(nDirs*64<<20))
 }
 
 func (s *CollectionFSSuite) TestBrokenManifests(c *check.C) {