}
}
+// serialize = false; readonly = false
func TestUnixVolumeWithGenericTests(t *testing.T) {
DoGenericVolumeTests(t, func(t *testing.T) TestableVolume {
return NewTestableUnixVolume(t, false, false)
})
}
-func TestUnixReadOnlyVolumeWithGenericTests(t *testing.T) {
- DoGenericReadOnlyVolumeTests(t, func(t *testing.T) TestableVolume {
+// serialize = false; readonly = true
+func TestUnixVolumeWithGenericTestsReadOnly(t *testing.T) {
+ DoGenericVolumeTests(t, func(t *testing.T) TestableVolume {
return NewTestableUnixVolume(t, false, true)
})
}
+// serialize = true; readonly = false
+func TestUnixVolumeWithGenericTestsSerialized(t *testing.T) {
+ DoGenericVolumeTests(t, func(t *testing.T) TestableVolume {
+ return NewTestableUnixVolume(t, true, false)
+ })
+}
+
+// serialize = false; readonly = false
+func TestUnixVolumeHandlersWithGenericVolumeTests(t *testing.T) {
+ DoHandlersWithGenericVolumeTests(t, func(t *testing.T) (*RRVolumeManager, []TestableVolume) {
+ vols := make([]Volume, 2)
+ testableUnixVols := make([]TestableVolume, 2)
+
+ for i := range vols {
+ v := NewTestableUnixVolume(t, false, false)
+ vols[i] = v
+ testableUnixVols[i] = v
+ }
+
+ return MakeRRVolumeManager(vols), testableUnixVols
+ })
+}
+
func TestGetNotFound(t *testing.T) {
v := NewTestableUnixVolume(t, false, false)
defer v.Teardown()
- v.Put(TEST_HASH, TEST_BLOCK)
+ v.Put(TestHash, TestBlock)
- buf, err := v.Get(TEST_HASH_2)
+ buf, err := v.Get(TestHash2)
switch {
case os.IsNotExist(err):
break
v := NewTestableUnixVolume(t, false, false)
defer v.Teardown()
- err := v.Put(TEST_HASH, TEST_BLOCK)
+ err := v.Put(TestHash, TestBlock)
if err != nil {
t.Error(err)
}
- p := fmt.Sprintf("%s/%s/%s", v.root, TEST_HASH[:3], TEST_HASH)
+ p := fmt.Sprintf("%s/%s/%s", v.root, TestHash[:3], TestHash)
if buf, err := ioutil.ReadFile(p); err != nil {
t.Error(err)
- } else if bytes.Compare(buf, TEST_BLOCK) != 0 {
+ } else if bytes.Compare(buf, TestBlock) != 0 {
t.Errorf("Write should have stored %s, did store %s",
- string(TEST_BLOCK), string(buf))
+ string(TestBlock), string(buf))
}
}
defer v.Teardown()
os.Chmod(v.root, 000)
- err := v.Put(TEST_HASH, TEST_BLOCK)
+ err := v.Put(TestHash, TestBlock)
if err == nil {
t.Error("Write should have failed")
}
v := NewTestableUnixVolume(t, false, true)
defer v.Teardown()
- v.PutRaw(TEST_HASH, TEST_BLOCK)
+ v.PutRaw(TestHash, TestBlock)
- _, err := v.Get(TEST_HASH)
+ _, err := v.Get(TestHash)
if err != nil {
t.Errorf("got err %v, expected nil", err)
}
- err = v.Put(TEST_HASH, TEST_BLOCK)
+ err = v.Put(TestHash, TestBlock)
if err != MethodDisabledError {
t.Errorf("got err %v, expected MethodDisabledError", err)
}
- err = v.Touch(TEST_HASH)
+ err = v.Touch(TestHash)
if err != MethodDisabledError {
t.Errorf("got err %v, expected MethodDisabledError", err)
}
- err = v.Delete(TEST_HASH)
+ err = v.Delete(TestHash)
if err != MethodDisabledError {
t.Errorf("got err %v, expected MethodDisabledError", err)
}
v := NewTestableUnixVolume(t, false, false)
defer v.Teardown()
- full_path := v.root + "/full"
+ fullPath := v.root + "/full"
now := fmt.Sprintf("%d", time.Now().Unix())
- os.Symlink(now, full_path)
+ os.Symlink(now, fullPath)
if !v.IsFull() {
t.Errorf("%s: claims not to be full", v)
}
- os.Remove(full_path)
+ os.Remove(fullPath)
// Test with an expired /full link.
expired := fmt.Sprintf("%d", time.Now().Unix()-3605)
- os.Symlink(expired, full_path)
+ os.Symlink(expired, fullPath)
if v.IsFull() {
t.Errorf("%s: should no longer be full", v)
}
v := NewTestableUnixVolume(t, false, false)
defer v.Teardown()
- v.Put(TEST_HASH, TEST_BLOCK)
+ v.Put(TestHash, TestBlock)
mockErr := errors.New("Mock error")
- err := v.getFunc(v.blockPath(TEST_HASH), func(rdr io.Reader) error {
+ err := v.getFunc(v.blockPath(TestHash), func(rdr io.Reader) error {
return mockErr
})
if err != mockErr {
defer v.Teardown()
funcCalled := false
- err := v.getFunc(v.blockPath(TEST_HASH), func(rdr io.Reader) error {
+ err := v.getFunc(v.blockPath(TestHash), func(rdr io.Reader) error {
funcCalled = true
return nil
})
v := NewTestableUnixVolume(t, false, false)
defer v.Teardown()
- v.Put(TEST_HASH, TEST_BLOCK)
+ v.Put(TestHash, TestBlock)
mtx := NewMockMutex()
v.locker = mtx
funcCalled := make(chan struct{})
- go v.getFunc(v.blockPath(TEST_HASH), func(rdr io.Reader) error {
+ go v.getFunc(v.blockPath(TestHash), func(rdr io.Reader) error {
funcCalled <- struct{}{}
return nil
})
v := NewTestableUnixVolume(t, false, false)
defer v.Teardown()
- v.Put(TEST_HASH, TEST_BLOCK)
- err := v.Compare(TEST_HASH, TEST_BLOCK)
+ v.Put(TestHash, TestBlock)
+ err := v.Compare(TestHash, TestBlock)
if err != nil {
t.Errorf("Got err %q, expected nil", err)
}
- err = v.Compare(TEST_HASH, []byte("baddata"))
+ err = v.Compare(TestHash, []byte("baddata"))
if err != CollisionError {
t.Errorf("Got err %q, expected %q", err, CollisionError)
}
- v.Put(TEST_HASH, []byte("baddata"))
- err = v.Compare(TEST_HASH, TEST_BLOCK)
+ v.Put(TestHash, []byte("baddata"))
+ err = v.Compare(TestHash, TestBlock)
if err != DiskHashError {
t.Errorf("Got err %q, expected %q", err, DiskHashError)
}
- p := fmt.Sprintf("%s/%s/%s", v.root, TEST_HASH[:3], TEST_HASH)
+ p := fmt.Sprintf("%s/%s/%s", v.root, TestHash[:3], TestHash)
os.Chmod(p, 000)
- err = v.Compare(TEST_HASH, TEST_BLOCK)
+ err = v.Compare(TestHash, TestBlock)
if err == nil || strings.Index(err.Error(), "permission denied") < 0 {
t.Errorf("Got err %q, expected %q", err, "permission denied")
}
}
+
+// TODO(twp): show that the underlying Read/Write operations executed
+// serially and not concurrently. The easiest way to do this is
+// probably to activate verbose or debug logging, capture log output
+// and examine it to confirm that Reads and Writes did not overlap.
+//
+// TODO(twp): a proper test of I/O serialization requires that a
+// second request start while the first one is still underway.
+// Guaranteeing that the test behaves this way requires some tricky
+// synchronization and mocking. For now we'll just launch a bunch of
+// requests simultaenously in goroutines and demonstrate that they
+// return accurate results.