X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/0bd1c28bed9a0756c61037947d5a9dccd5066f00..23cc1183e500eb6fa22b611c1c6fc60de1aff9e7:/services/keepstore/volume_unix_test.go diff --git a/services/keepstore/volume_unix_test.go b/services/keepstore/volume_unix_test.go index 278e656066..08ca31cc5b 100644 --- a/services/keepstore/volume_unix_test.go +++ b/services/keepstore/volume_unix_test.go @@ -2,32 +2,44 @@ package main import ( "bytes" + "errors" "fmt" + "io" "io/ioutil" "os" + "regexp" + "sort" + "strings" + "sync" + "syscall" "testing" "time" ) -func TempUnixVolume(t *testing.T, serialize bool) UnixVolume { +func TempUnixVolume(t *testing.T, serialize bool, readonly bool) *UnixVolume { d, err := ioutil.TempDir("", "volume_test") if err != nil { t.Fatal(err) } - return MakeUnixVolume(d, serialize) + var locker sync.Locker + if serialize { + locker = &sync.Mutex{} + } + return &UnixVolume{ + root: d, + locker: locker, + readonly: readonly, + } } -func _teardown(v UnixVolume) { - if v.queue != nil { - close(v.queue) - } +func _teardown(v *UnixVolume) { os.RemoveAll(v.root) } -// store writes a Keep block directly into a UnixVolume, for testing -// UnixVolume methods. -// -func _store(t *testing.T, vol UnixVolume, filename string, block []byte) { +// _store writes a Keep block directly into a UnixVolume, bypassing +// the overhead and safeguards of Put(). Useful for storing bogus data +// and isolating unit tests from Put() behavior. +func _store(t *testing.T, vol *UnixVolume, filename string, block []byte) { blockdir := fmt.Sprintf("%s/%s", vol.root, filename[:3]) if err := os.MkdirAll(blockdir, 0755); err != nil { t.Fatal(err) @@ -43,7 +55,7 @@ func _store(t *testing.T, vol UnixVolume, filename string, block []byte) { } func TestGet(t *testing.T) { - v := TempUnixVolume(t, false) + v := TempUnixVolume(t, false, false) defer _teardown(v) _store(t, v, TEST_HASH, TEST_BLOCK) @@ -57,7 +69,7 @@ func TestGet(t *testing.T) { } func TestGetNotFound(t *testing.T) { - v := TempUnixVolume(t, false) + v := TempUnixVolume(t, false, false) defer _teardown(v) _store(t, v, TEST_HASH, TEST_BLOCK) @@ -72,8 +84,44 @@ func TestGetNotFound(t *testing.T) { } } +func TestIndexTo(t *testing.T) { + v := TempUnixVolume(t, false, false) + defer _teardown(v) + + _store(t, v, TEST_HASH, TEST_BLOCK) + _store(t, v, TEST_HASH_2, TEST_BLOCK_2) + _store(t, v, TEST_HASH_3, TEST_BLOCK_3) + + buf := new(bytes.Buffer) + v.IndexTo("", buf) + index_rows := strings.Split(string(buf.Bytes()), "\n") + sort.Strings(index_rows) + sorted_index := strings.Join(index_rows, "\n") + m, err := regexp.MatchString( + `^\n`+TEST_HASH+`\+\d+ \d+\n`+ + TEST_HASH_3+`\+\d+ \d+\n`+ + TEST_HASH_2+`\+\d+ \d+$`, + sorted_index) + if err != nil { + t.Error(err) + } else if !m { + t.Errorf("Got index %q for empty prefix", sorted_index) + } + + for _, prefix := range []string{"f", "f15", "f15ac"} { + buf = new(bytes.Buffer) + v.IndexTo(prefix, buf) + m, err := regexp.MatchString(`^`+TEST_HASH_2+`\+\d+ \d+\n$`, string(buf.Bytes())) + if err != nil { + t.Error(err) + } else if !m { + t.Errorf("Got index %q for prefix %q", string(buf.Bytes()), prefix) + } + } +} + func TestPut(t *testing.T) { - v := TempUnixVolume(t, false) + v := TempUnixVolume(t, false, false) defer _teardown(v) err := v.Put(TEST_HASH, TEST_BLOCK) @@ -90,7 +138,7 @@ func TestPut(t *testing.T) { } func TestPutBadVolume(t *testing.T) { - v := TempUnixVolume(t, false) + v := TempUnixVolume(t, false, false) defer _teardown(v) os.Chmod(v.root, 000) @@ -100,6 +148,87 @@ func TestPutBadVolume(t *testing.T) { } } +func TestUnixVolumeReadonly(t *testing.T) { + v := TempUnixVolume(t, false, false) + defer _teardown(v) + + // First write something before marking readonly + err := v.Put(TEST_HASH, TEST_BLOCK) + if err != nil { + t.Error("got err %v, expected nil", err) + } + + v.readonly = true + + _, err = v.Get(TEST_HASH) + if err != nil { + t.Error("got err %v, expected nil", err) + } + + err = v.Put(TEST_HASH, TEST_BLOCK) + if err != MethodDisabledError { + t.Error("got err %v, expected MethodDisabledError", err) + } + + err = v.Touch(TEST_HASH) + if err != MethodDisabledError { + t.Error("got err %v, expected MethodDisabledError", err) + } + + err = v.Delete(TEST_HASH) + if err != MethodDisabledError { + t.Error("got err %v, expected MethodDisabledError", err) + } +} + +// TestPutTouch +// Test that when applying PUT to a block that already exists, +// the block's modification time is updated. +func TestPutTouch(t *testing.T) { + v := TempUnixVolume(t, false, false) + defer _teardown(v) + + if err := v.Put(TEST_HASH, TEST_BLOCK); err != nil { + t.Error(err) + } + + // We'll verify { t0 < threshold < t1 }, where t0 is the + // existing block's timestamp on disk before Put() and t1 is + // its timestamp after Put(). + threshold := time.Now().Add(-time.Second) + + // Set the stored block's mtime far enough in the past that we + // can see the difference between "timestamp didn't change" + // and "timestamp granularity is too low". + { + oldtime := time.Now().Add(-20 * time.Second).Unix() + if err := syscall.Utime(v.blockPath(TEST_HASH), + &syscall.Utimbuf{oldtime, oldtime}); err != nil { + t.Error(err) + } + + // Make sure v.Mtime() agrees the above Utime really worked. + if t0, err := v.Mtime(TEST_HASH); err != nil || t0.IsZero() || !t0.Before(threshold) { + t.Errorf("Setting mtime failed: %v, %v", t0, err) + } + } + + // Write the same block again. + if err := v.Put(TEST_HASH, TEST_BLOCK); err != nil { + t.Error(err) + } + + // Verify threshold < t1 + t1, err := v.Mtime(TEST_HASH) + if err != nil { + t.Error(err) + } + if t1.Before(threshold) { + t.Errorf("t1 %v must be >= threshold %v after v.Put ", + t1, threshold) + } +} + // Serialization tests: launch a bunch of concurrent // // TODO(twp): show that the underlying Read/Write operations executed @@ -116,7 +245,7 @@ func TestPutBadVolume(t *testing.T) { // func TestGetSerialized(t *testing.T) { // Create a volume with I/O serialization enabled. - v := TempUnixVolume(t, true) + v := TempUnixVolume(t, true, false) defer _teardown(v) _store(t, v, TEST_HASH, TEST_BLOCK) @@ -165,7 +294,7 @@ func TestGetSerialized(t *testing.T) { func TestPutSerialized(t *testing.T) { // Create a volume with I/O serialization enabled. - v := TempUnixVolume(t, true) + v := TempUnixVolume(t, true, false) defer _teardown(v) sem := make(chan int) @@ -194,7 +323,7 @@ func TestPutSerialized(t *testing.T) { }(sem) // Wait for all goroutines to finish - for done := 0; done < 2; { + for done := 0; done < 3; { done += <-sem } @@ -225,7 +354,7 @@ func TestPutSerialized(t *testing.T) { } func TestIsFull(t *testing.T) { - v := TempUnixVolume(t, false) + v := TempUnixVolume(t, false, false) defer _teardown(v) full_path := v.root + "/full" @@ -243,3 +372,118 @@ func TestIsFull(t *testing.T) { t.Errorf("%s: should no longer be full", v) } } + +func TestNodeStatus(t *testing.T) { + v := TempUnixVolume(t, false, false) + defer _teardown(v) + + // Get node status and make a basic sanity check. + volinfo := v.Status() + if volinfo.MountPoint != v.root { + t.Errorf("GetNodeStatus mount_point %s, expected %s", volinfo.MountPoint, v.root) + } + if volinfo.DeviceNum == 0 { + t.Errorf("uninitialized device_num in %v", volinfo) + } + if volinfo.BytesFree == 0 { + t.Errorf("uninitialized bytes_free in %v", volinfo) + } + if volinfo.BytesUsed == 0 { + t.Errorf("uninitialized bytes_used in %v", volinfo) + } +} + +func TestUnixVolumeGetFuncWorkerError(t *testing.T) { + v := TempUnixVolume(t, false, false) + defer _teardown(v) + + v.Put(TEST_HASH, TEST_BLOCK) + mockErr := errors.New("Mock error") + err := v.getFunc(v.blockPath(TEST_HASH), func(rdr io.Reader) error { + return mockErr + }) + if err != mockErr { + t.Errorf("Got %v, expected %v", err, mockErr) + } +} + +func TestUnixVolumeGetFuncFileError(t *testing.T) { + v := TempUnixVolume(t, false, false) + defer _teardown(v) + + funcCalled := false + err := v.getFunc(v.blockPath(TEST_HASH), func(rdr io.Reader) error { + funcCalled = true + return nil + }) + if err == nil { + t.Errorf("Expected error opening non-existent file") + } + if funcCalled { + t.Errorf("Worker func should not have been called") + } +} + +func TestUnixVolumeGetFuncWorkerWaitsOnMutex(t *testing.T) { + v := TempUnixVolume(t, false, false) + defer _teardown(v) + + v.Put(TEST_HASH, TEST_BLOCK) + + mtx := NewMockMutex() + v.locker = mtx + + funcCalled := make(chan struct{}) + go v.getFunc(v.blockPath(TEST_HASH), func(rdr io.Reader) error { + funcCalled <- struct{}{} + return nil + }) + select { + case mtx.AllowLock <- struct{}{}: + case <-funcCalled: + t.Fatal("Function was called before mutex was acquired") + case <-time.After(5 * time.Second): + t.Fatal("Timed out before mutex was acquired") + } + select { + case <-funcCalled: + case mtx.AllowUnlock <- struct{}{}: + t.Fatal("Mutex was released before function was called") + case <-time.After(5 * time.Second): + t.Fatal("Timed out waiting for funcCalled") + } + select { + case mtx.AllowUnlock <- struct{}{}: + case <-time.After(5 * time.Second): + t.Fatal("Timed out waiting for getFunc() to release mutex") + } +} + +func TestUnixVolumeCompare(t *testing.T) { + v := TempUnixVolume(t, false, false) + defer _teardown(v) + + v.Put(TEST_HASH, TEST_BLOCK) + err := v.Compare(TEST_HASH, TEST_BLOCK) + if err != nil { + t.Errorf("Got err %q, expected nil", err) + } + + err = v.Compare(TEST_HASH, []byte("baddata")) + if err != CollisionError { + t.Errorf("Got err %q, expected %q", err, CollisionError) + } + + _store(t, v, TEST_HASH, []byte("baddata")) + err = v.Compare(TEST_HASH, TEST_BLOCK) + if err != DiskHashError { + t.Errorf("Got err %q, expected %q", err, DiskHashError) + } + + p := fmt.Sprintf("%s/%s/%s", v.root, TEST_HASH[:3], TEST_HASH) + os.Chmod(p, 000) + err = v.Compare(TEST_HASH, TEST_BLOCK) + if err == nil || strings.Index(err.Error(), "permission denied") < 0 { + t.Errorf("Got err %q, expected %q", err, "permission denied") + } +}