import (
"bytes"
"crypto/md5"
+ "crypto/sha1"
"errors"
"fmt"
"io"
"os"
"regexp"
"runtime"
+ "runtime/pprof"
+ "strings"
"sync"
+ "sync/atomic"
"testing"
"time"
- "git.curoverse.com/arvados.git/sdk/go/arvadostest"
check "gopkg.in/check.v1"
)
var _ = check.Suite(&CollectionFSSuite{})
type keepClientStub struct {
- blocks map[string][]byte
+ blocks map[string][]byte
+ refreshable map[string]bool
+ onPut func(bufcopy []byte) // called from PutB, before acquiring lock
sync.RWMutex
}
locator := fmt.Sprintf("%x+%d+A12345@abcde", md5.Sum(p), len(p))
buf := make([]byte, len(p))
copy(buf, p)
+ if kcs.onPut != nil {
+ kcs.onPut(buf)
+ }
kcs.Lock()
defer kcs.Unlock()
kcs.blocks[locator[:32]] = buf
return locator, 1, nil
}
+var localOrRemoteSignature = regexp.MustCompile(`\+[AR][^+]*`)
+
+func (kcs *keepClientStub) LocalLocator(locator string) (string, error) {
+ kcs.Lock()
+ defer kcs.Unlock()
+ if strings.Contains(locator, "+R") {
+ if len(locator) < 32 {
+ return "", fmt.Errorf("bad locator: %q", locator)
+ }
+ if _, ok := kcs.blocks[locator[:32]]; !ok && !kcs.refreshable[locator[:32]] {
+ return "", fmt.Errorf("kcs.refreshable[%q]==false", locator)
+ }
+ }
+ fakeSig := fmt.Sprintf("+A%x@%x", sha1.Sum(nil), time.Now().Add(time.Hour*24*14).Unix())
+ return localOrRemoteSignature.ReplaceAllLiteralString(locator, fakeSig), nil
+}
+
type CollectionFSSuite struct {
client *Client
coll Collection
fs CollectionFileSystem
- kc keepClient
+ kc *keepClientStub
}
func (s *CollectionFSSuite) SetUpTest(c *check.C) {
s.client = NewClientFromEnv()
- err := s.client.RequestAndDecode(&s.coll, "GET", "arvados/v1/collections/"+arvadostest.FooAndBarFilesInDirUUID, nil, nil)
+ err := s.client.RequestAndDecode(&s.coll, "GET", "arvados/v1/collections/"+fixtureFooAndBarFilesInDirUUID, nil, nil)
c.Assert(err, check.IsNil)
s.kc = &keepClientStub{
blocks: map[string][]byte{
checkSize(11)
}
+func (s *CollectionFSSuite) TestMarshalCopiesRemoteBlocks(c *check.C) {
+ foo := "foo"
+ bar := "bar"
+ hash := map[string]string{
+ foo: fmt.Sprintf("%x", md5.Sum([]byte(foo))),
+ bar: fmt.Sprintf("%x", md5.Sum([]byte(bar))),
+ }
+
+ fs, err := (&Collection{
+ ManifestText: ". " + hash[foo] + "+3+Rzaaaa-foo@bab " + hash[bar] + "+3+A12345@ffffff 0:2:fo.txt 2:4:obar.txt\n",
+ }).FileSystem(s.client, s.kc)
+ c.Assert(err, check.IsNil)
+ manifest, err := fs.MarshalManifest(".")
+ c.Check(manifest, check.Equals, "")
+ c.Check(err, check.NotNil)
+
+ s.kc.refreshable = map[string]bool{hash[bar]: true}
+
+ for _, sigIn := range []string{"Rzaaaa-foo@bab", "A12345@abcde"} {
+ fs, err = (&Collection{
+ ManifestText: ". " + hash[foo] + "+3+A12345@fffff " + hash[bar] + "+3+" + sigIn + " 0:2:fo.txt 2:4:obar.txt\n",
+ }).FileSystem(s.client, s.kc)
+ c.Assert(err, check.IsNil)
+ manifest, err := fs.MarshalManifest(".")
+ c.Check(err, check.IsNil)
+ // Both blocks should now have +A signatures.
+ c.Check(manifest, check.Matches, `.*\+A.* .*\+A.*\n`)
+ c.Check(manifest, check.Not(check.Matches), `.*\+R.*\n`)
+ }
+}
+
func (s *CollectionFSSuite) TestMarshalSmallBlocks(c *check.C) {
maxBlockSize = 8
defer func() { maxBlockSize = 2 << 26 }()
}
maxBlockSize = 8
- defer func() { maxBlockSize = 2 << 26 }()
+ defer func() { maxBlockSize = 1 << 26 }()
var wg sync.WaitGroup
for n := 0; n < 128; n++ {
const ngoroutines = 256
var wg sync.WaitGroup
- for n := 0; n < nfiles; n++ {
+ for n := 0; n < ngoroutines; n++ {
wg.Add(1)
go func(n int) {
defer wg.Done()
f, err := s.fs.OpenFile(fmt.Sprintf("random-%d", n), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0)
c.Assert(err, check.IsNil)
defer f.Close()
- for i := 0; i < ngoroutines; i++ {
+ for i := 0; i < nfiles; i++ {
trunc := rand.Intn(65)
woff := rand.Intn(trunc + 1)
wbytes = wbytes[:rand.Intn(64-woff+1)]
c.Check(string(buf), check.Equals, string(expect))
c.Check(err, check.IsNil)
}
- s.checkMemSize(c, f)
}(n)
}
wg.Wait()
+ for n := 0; n < ngoroutines; n++ {
+ f, err := s.fs.OpenFile(fmt.Sprintf("random-%d", n), os.O_RDONLY, 0)
+ c.Assert(err, check.IsNil)
+ f.(*filehandle).inode.(*filenode).waitPrune()
+ s.checkMemSize(c, f)
+ defer f.Close()
+ }
+
root, err := s.fs.Open("/")
c.Assert(err, check.IsNil)
defer root.Close()
c.Check(data, check.DeepEquals, []byte{1, 2, 3, 4, 5})
}
+func (s *CollectionFSSuite) TestRenameDirectory(c *check.C) {
+ fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+ c.Assert(err, check.IsNil)
+ err = fs.Mkdir("foo", 0755)
+ c.Assert(err, check.IsNil)
+ err = fs.Mkdir("bar", 0755)
+ c.Assert(err, check.IsNil)
+ err = fs.Rename("bar", "baz")
+ c.Check(err, check.IsNil)
+ err = fs.Rename("foo", "baz")
+ c.Check(err, check.NotNil)
+ err = fs.Rename("foo", "baz/")
+ c.Check(err, check.IsNil)
+ err = fs.Rename("baz/foo", ".")
+ c.Check(err, check.Equals, ErrInvalidArgument)
+ err = fs.Rename("baz/foo/", ".")
+ c.Check(err, check.Equals, ErrInvalidArgument)
+}
+
func (s *CollectionFSSuite) TestRename(c *check.C) {
fs, err := (&Collection{}).FileSystem(s.client, s.kc)
c.Assert(err, check.IsNil)
}
}
-func (s *CollectionFSSuite) TestPersistEmptyFiles(c *check.C) {
+func (s *CollectionFSSuite) TestPersistEmptyFilesAndDirs(c *check.C) {
var err error
s.fs, err = (&Collection{}).FileSystem(s.client, s.kc)
c.Assert(err, check.IsNil)
- for _, name := range []string{"dir", "dir/zerodir", "zero", "zero/zero"} {
+ for _, name := range []string{"dir", "dir/zerodir", "empty", "not empty", "not empty/empty", "zero", "zero/zero"} {
err = s.fs.Mkdir(name, 0755)
c.Assert(err, check.IsNil)
}
c.Check(err, check.IsNil)
c.Check(buf, check.DeepEquals, data)
}
+
+ expectDir := map[string]int{
+ "empty": 0,
+ "not empty": 1,
+ "not empty/empty": 0,
+ }
+ for name, expectLen := range expectDir {
+ _, err := persisted.Open(name + "/bogus")
+ c.Check(err, check.NotNil)
+
+ d, err := persisted.Open(name)
+ defer d.Close()
+ c.Check(err, check.IsNil)
+ fi, err := d.Readdir(-1)
+ c.Check(err, check.IsNil)
+ c.Check(fi, check.HasLen, expectLen)
+ }
}
func (s *CollectionFSSuite) TestOpenFileFlags(c *check.C) {
c.Check(err, check.ErrorMatches, `invalid flag.*`)
}
-func (s *CollectionFSSuite) TestFlushFullBlocks(c *check.C) {
+func (s *CollectionFSSuite) TestFlushFullBlocksWritingLongFile(c *check.C) {
+ defer func(cw, mbs int) {
+ concurrentWriters = cw
+ maxBlockSize = mbs
+ }(concurrentWriters, maxBlockSize)
+ concurrentWriters = 2
maxBlockSize = 1024
- defer func() { maxBlockSize = 2 << 26 }()
+
+ proceed := make(chan struct{})
+ var started, concurrent int32
+ blk2done := false
+ s.kc.onPut = func([]byte) {
+ atomic.AddInt32(&concurrent, 1)
+ switch atomic.AddInt32(&started, 1) {
+ case 1:
+ // Wait until block 2 starts and finishes, and block 3 starts
+ select {
+ case <-proceed:
+ c.Check(blk2done, check.Equals, true)
+ case <-time.After(time.Second):
+ c.Error("timed out")
+ }
+ case 2:
+ time.Sleep(time.Millisecond)
+ blk2done = true
+ case 3:
+ close(proceed)
+ default:
+ time.Sleep(time.Millisecond)
+ }
+ c.Check(atomic.AddInt32(&concurrent, -1) < int32(concurrentWriters), check.Equals, true)
+ }
fs, err := (&Collection{}).FileSystem(s.client, s.kc)
c.Assert(err, check.IsNil)
}
return
}
+ f.(*filehandle).inode.(*filenode).waitPrune()
c.Check(currentMemExtents(), check.HasLen, 1)
m, err := fs.MarshalManifest(".")
c.Check(currentMemExtents(), check.HasLen, 0)
}
+// Ensure blocks get flushed to disk if a lot of data is written to
+// small files/directories without calling sync().
+//
+// Write four 512KiB files into each of 256 top-level dirs (total
+// 512MiB), calling Flush() every 8 dirs. Ensure memory usage never
+// exceeds 24MiB (4 concurrentWriters * 2MiB + 8 unflushed dirs *
+// 2MiB).
+func (s *CollectionFSSuite) TestFlushAll(c *check.C) {
+ fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+ c.Assert(err, check.IsNil)
+
+ s.kc.onPut = func([]byte) {
+ // discard flushed data -- otherwise the stub will use
+ // unlimited memory
+ time.Sleep(time.Millisecond)
+ s.kc.Lock()
+ defer s.kc.Unlock()
+ s.kc.blocks = map[string][]byte{}
+ }
+ for i := 0; i < 256; i++ {
+ buf := bytes.NewBuffer(make([]byte, 524288))
+ fmt.Fprintf(buf, "test file in dir%d", i)
+
+ dir := fmt.Sprintf("dir%d", i)
+ fs.Mkdir(dir, 0755)
+ for j := 0; j < 2; j++ {
+ f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, j), os.O_WRONLY|os.O_CREATE, 0)
+ c.Assert(err, check.IsNil)
+ defer f.Close()
+ _, err = io.Copy(f, buf)
+ c.Assert(err, check.IsNil)
+ }
+
+ if i%8 == 0 {
+ fs.Flush("", true)
+ }
+
+ size := fs.memorySize()
+ if !c.Check(size <= 1<<24, check.Equals, true) {
+ c.Logf("at dir%d fs.memorySize()=%d", i, size)
+ return
+ }
+ }
+}
+
+// Ensure short blocks at the end of a stream don't get flushed by
+// Flush(false).
+//
+// Write 67x 1MiB files to each of 8 dirs, and check that 8 full 64MiB
+// blocks have been flushed while 8x 3MiB is still buffered in memory.
+func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) {
+ fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+ c.Assert(err, check.IsNil)
+
+ var flushed int64
+ s.kc.onPut = func(p []byte) {
+ atomic.AddInt64(&flushed, int64(len(p)))
+ }
+
+ nDirs := int64(8)
+ megabyte := make([]byte, 1<<20)
+ for i := int64(0); i < nDirs; i++ {
+ dir := fmt.Sprintf("dir%d", i)
+ fs.Mkdir(dir, 0755)
+ for j := 0; j < 67; j++ {
+ f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, j), os.O_WRONLY|os.O_CREATE, 0)
+ c.Assert(err, check.IsNil)
+ defer f.Close()
+ _, err = f.Write(megabyte)
+ c.Assert(err, check.IsNil)
+ }
+ }
+ c.Check(fs.memorySize(), check.Equals, int64(nDirs*67<<20))
+ c.Check(flushed, check.Equals, int64(0))
+
+ waitForFlush := func(expectUnflushed, expectFlushed int64) {
+ for deadline := time.Now().Add(5 * time.Second); fs.memorySize() > expectUnflushed && time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) {
+ }
+ c.Check(fs.memorySize(), check.Equals, expectUnflushed)
+ c.Check(flushed, check.Equals, expectFlushed)
+ }
+
+ // Nothing flushed yet
+ waitForFlush((nDirs*67)<<20, 0)
+
+ // Flushing a non-empty dir "/" is non-recursive and there are
+ // no top-level files, so this has no effect
+ fs.Flush("/", false)
+ waitForFlush((nDirs*67)<<20, 0)
+
+ // Flush the full block in dir0
+ fs.Flush("dir0", false)
+ waitForFlush((nDirs*67-64)<<20, 64<<20)
+
+ err = fs.Flush("dir-does-not-exist", false)
+ c.Check(err, check.NotNil)
+
+ // Flush full blocks in all dirs
+ fs.Flush("", false)
+ waitForFlush(nDirs*3<<20, nDirs*64<<20)
+
+ // Flush non-full blocks, too
+ fs.Flush("", true)
+ waitForFlush(0, nDirs*67<<20)
+}
+
+// Even when writing lots of files/dirs from different goroutines, as
+// long as Flush(dir,false) is called after writing each file,
+// unflushed data should be limited to one full block per
+// concurrentWriter, plus one nearly-full block at the end of each
+// dir/stream.
+func (s *CollectionFSSuite) TestMaxUnflushed(c *check.C) {
+ nDirs := int64(8)
+ maxUnflushed := (int64(concurrentWriters) + nDirs) << 26
+
+ fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+ c.Assert(err, check.IsNil)
+
+ release := make(chan struct{})
+ timeout := make(chan struct{})
+ time.AfterFunc(10*time.Second, func() { close(timeout) })
+ var putCount, concurrency int64
+ var unflushed int64
+ s.kc.onPut = func(p []byte) {
+ defer atomic.AddInt64(&unflushed, -int64(len(p)))
+ cur := atomic.AddInt64(&concurrency, 1)
+ defer atomic.AddInt64(&concurrency, -1)
+ pc := atomic.AddInt64(&putCount, 1)
+ if pc < int64(concurrentWriters) {
+ // Block until we reach concurrentWriters, to
+ // make sure we're really accepting concurrent
+ // writes.
+ select {
+ case <-release:
+ case <-timeout:
+ c.Error("timeout")
+ }
+ } else if pc == int64(concurrentWriters) {
+ // Unblock the first N-1 PUT reqs.
+ close(release)
+ }
+ c.Assert(cur <= int64(concurrentWriters), check.Equals, true)
+ c.Assert(atomic.LoadInt64(&unflushed) <= maxUnflushed, check.Equals, true)
+ }
+
+ var owg sync.WaitGroup
+ megabyte := make([]byte, 1<<20)
+ for i := int64(0); i < nDirs; i++ {
+ dir := fmt.Sprintf("dir%d", i)
+ fs.Mkdir(dir, 0755)
+ owg.Add(1)
+ go func() {
+ defer owg.Done()
+ defer fs.Flush(dir, true)
+ var iwg sync.WaitGroup
+ defer iwg.Wait()
+ for j := 0; j < 67; j++ {
+ iwg.Add(1)
+ go func(j int) {
+ defer iwg.Done()
+ f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, j), os.O_WRONLY|os.O_CREATE, 0)
+ c.Assert(err, check.IsNil)
+ defer f.Close()
+ n, err := f.Write(megabyte)
+ c.Assert(err, check.IsNil)
+ atomic.AddInt64(&unflushed, int64(n))
+ fs.Flush(dir, false)
+ }(j)
+ }
+ }()
+ }
+ owg.Wait()
+ fs.Flush("", true)
+}
+
+func (s *CollectionFSSuite) TestFlushStress(c *check.C) {
+ done := false
+ defer func() { done = true }()
+ time.AfterFunc(10*time.Second, func() {
+ if !done {
+ pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
+ panic("timeout")
+ }
+ })
+
+ wrote := 0
+ s.kc.onPut = func(p []byte) {
+ s.kc.Lock()
+ s.kc.blocks = map[string][]byte{}
+ wrote++
+ defer c.Logf("wrote block %d, %d bytes", wrote, len(p))
+ s.kc.Unlock()
+ time.Sleep(20 * time.Millisecond)
+ }
+
+ fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+ c.Assert(err, check.IsNil)
+
+ data := make([]byte, 1<<20)
+ for i := 0; i < 3; i++ {
+ dir := fmt.Sprintf("dir%d", i)
+ fs.Mkdir(dir, 0755)
+ for j := 0; j < 200; j++ {
+ data[0] = byte(j)
+ f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, j), os.O_WRONLY|os.O_CREATE, 0)
+ c.Assert(err, check.IsNil)
+ _, err = f.Write(data)
+ c.Assert(err, check.IsNil)
+ f.Close()
+ fs.Flush(dir, false)
+ }
+ _, err := fs.MarshalManifest(".")
+ c.Check(err, check.IsNil)
+ }
+}
+
+func (s *CollectionFSSuite) TestFlushShort(c *check.C) {
+ s.kc.onPut = func([]byte) {
+ s.kc.Lock()
+ s.kc.blocks = map[string][]byte{}
+ s.kc.Unlock()
+ }
+ fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+ c.Assert(err, check.IsNil)
+ for _, blocksize := range []int{8, 1000000} {
+ dir := fmt.Sprintf("dir%d", blocksize)
+ err = fs.Mkdir(dir, 0755)
+ c.Assert(err, check.IsNil)
+ data := make([]byte, blocksize)
+ for i := 0; i < 100; i++ {
+ f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, i), os.O_WRONLY|os.O_CREATE, 0)
+ c.Assert(err, check.IsNil)
+ _, err = f.Write(data)
+ c.Assert(err, check.IsNil)
+ f.Close()
+ fs.Flush(dir, false)
+ }
+ fs.Flush(dir, true)
+ _, err := fs.MarshalManifest(".")
+ c.Check(err, check.IsNil)
+ }
+}
+
func (s *CollectionFSSuite) TestBrokenManifests(c *check.C) {
for _, txt := range []string{
"\n",
". d41d8cd98f00b204e9800998ecf8427e+0 foo:0:foo\n",
". d41d8cd98f00b204e9800998ecf8427e+0 0:foo:foo\n",
". d41d8cd98f00b204e9800998ecf8427e+1 0:1:foo 1:1:bar\n",
+ ". d41d8cd98f00b204e9800998ecf8427e+1 0:1:\\056\n",
+ ". d41d8cd98f00b204e9800998ecf8427e+1 0:1:\\056\\057\\056\n",
+ ". d41d8cd98f00b204e9800998ecf8427e+1 0:1:.\n",
+ ". d41d8cd98f00b204e9800998ecf8427e+1 0:1:..\n",
+ ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:..\n",
+ ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/..\n",
". d41d8cd98f00b204e9800998ecf8427e+1 0:0:foo\n./foo d41d8cd98f00b204e9800998ecf8427e+1 0:0:bar\n",
"./foo d41d8cd98f00b204e9800998ecf8427e+1 0:0:bar\n. d41d8cd98f00b204e9800998ecf8427e+1 0:0:foo\n",
} {
for _, txt := range []string{
"",
". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
- ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo 0:0:foo 0:0:bar\n",
+ ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:...\n",
+ ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:. 0:0:. 0:0:\\056 0:0:\\056\n",
+ ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/. 0:0:. 0:0:foo\\057bar\\057\\056\n",
". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo 0:0:foo 0:0:bar\n",
". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/bar\n./foo d41d8cd98f00b204e9800998ecf8427e+0 0:0:bar\n",
} {