17 type TestableUnixVolume struct {
22 func NewTestableUnixVolume(t *testing.T, serialize bool, readonly bool) *TestableUnixVolume {
23 d, err := ioutil.TempDir("", "volume_test")
27 var locker sync.Locker
29 locker = &sync.Mutex{}
31 return &TestableUnixVolume{
32 UnixVolume: UnixVolume{
41 // PutRaw writes a Keep block directly into a UnixVolume, even if
42 // the volume is readonly.
43 func (v *TestableUnixVolume) PutRaw(locator string, data []byte) {
44 defer func(orig bool) {
48 err := v.Put(locator, data)
54 func (v *TestableUnixVolume) TouchWithDate(locator string, lastPut time.Time) {
55 err := syscall.Utime(v.blockPath(locator), &syscall.Utimbuf{lastPut.Unix(), lastPut.Unix()})
61 func (v *TestableUnixVolume) Teardown() {
62 if err := os.RemoveAll(v.root); err != nil {
67 // serialize = false; readonly = false
68 func TestUnixVolumeWithGenericTests(t *testing.T) {
69 DoGenericVolumeTests(t, func(t *testing.T) TestableVolume {
70 return NewTestableUnixVolume(t, false, false)
74 // serialize = false; readonly = true
75 func TestUnixVolumeWithGenericTestsReadOnly(t *testing.T) {
76 DoGenericVolumeTests(t, func(t *testing.T) TestableVolume {
77 return NewTestableUnixVolume(t, false, true)
81 // serialize = true; readonly = false
82 func TestUnixVolumeWithGenericTestsSerialized(t *testing.T) {
83 DoGenericVolumeTests(t, func(t *testing.T) TestableVolume {
84 return NewTestableUnixVolume(t, true, false)
88 func TestGetNotFound(t *testing.T) {
89 v := NewTestableUnixVolume(t, false, false)
91 v.Put(TEST_HASH, TEST_BLOCK)
93 buf, err := v.Get(TEST_HASH_2)
95 case os.IsNotExist(err):
98 t.Errorf("Read should have failed, returned %s", string(buf))
100 t.Errorf("Read expected ErrNotExist, got: %s", err)
104 func TestPut(t *testing.T) {
105 v := NewTestableUnixVolume(t, false, false)
108 err := v.Put(TEST_HASH, TEST_BLOCK)
112 p := fmt.Sprintf("%s/%s/%s", v.root, TEST_HASH[:3], TEST_HASH)
113 if buf, err := ioutil.ReadFile(p); err != nil {
115 } else if bytes.Compare(buf, TEST_BLOCK) != 0 {
116 t.Errorf("Write should have stored %s, did store %s",
117 string(TEST_BLOCK), string(buf))
121 func TestPutBadVolume(t *testing.T) {
122 v := NewTestableUnixVolume(t, false, false)
125 os.Chmod(v.root, 000)
126 err := v.Put(TEST_HASH, TEST_BLOCK)
128 t.Error("Write should have failed")
132 func TestUnixVolumeReadonly(t *testing.T) {
133 v := NewTestableUnixVolume(t, false, true)
136 v.PutRaw(TEST_HASH, TEST_BLOCK)
138 _, err := v.Get(TEST_HASH)
140 t.Errorf("got err %v, expected nil", err)
143 err = v.Put(TEST_HASH, TEST_BLOCK)
144 if err != MethodDisabledError {
145 t.Errorf("got err %v, expected MethodDisabledError", err)
148 err = v.Touch(TEST_HASH)
149 if err != MethodDisabledError {
150 t.Errorf("got err %v, expected MethodDisabledError", err)
153 err = v.Delete(TEST_HASH)
154 if err != MethodDisabledError {
155 t.Errorf("got err %v, expected MethodDisabledError", err)
159 func TestIsFull(t *testing.T) {
160 v := NewTestableUnixVolume(t, false, false)
163 fullPath := v.root + "/full"
164 now := fmt.Sprintf("%d", time.Now().Unix())
165 os.Symlink(now, fullPath)
167 t.Errorf("%s: claims not to be full", v)
171 // Test with an expired /full link.
172 expired := fmt.Sprintf("%d", time.Now().Unix()-3605)
173 os.Symlink(expired, fullPath)
175 t.Errorf("%s: should no longer be full", v)
179 func TestNodeStatus(t *testing.T) {
180 v := NewTestableUnixVolume(t, false, false)
183 // Get node status and make a basic sanity check.
184 volinfo := v.Status()
185 if volinfo.MountPoint != v.root {
186 t.Errorf("GetNodeStatus mount_point %s, expected %s", volinfo.MountPoint, v.root)
188 if volinfo.DeviceNum == 0 {
189 t.Errorf("uninitialized device_num in %v", volinfo)
191 if volinfo.BytesFree == 0 {
192 t.Errorf("uninitialized bytes_free in %v", volinfo)
194 if volinfo.BytesUsed == 0 {
195 t.Errorf("uninitialized bytes_used in %v", volinfo)
199 func TestUnixVolumeGetFuncWorkerError(t *testing.T) {
200 v := NewTestableUnixVolume(t, false, false)
203 v.Put(TEST_HASH, TEST_BLOCK)
204 mockErr := errors.New("Mock error")
205 err := v.getFunc(v.blockPath(TEST_HASH), func(rdr io.Reader) error {
209 t.Errorf("Got %v, expected %v", err, mockErr)
213 func TestUnixVolumeGetFuncFileError(t *testing.T) {
214 v := NewTestableUnixVolume(t, false, false)
218 err := v.getFunc(v.blockPath(TEST_HASH), func(rdr io.Reader) error {
223 t.Errorf("Expected error opening non-existent file")
226 t.Errorf("Worker func should not have been called")
230 func TestUnixVolumeGetFuncWorkerWaitsOnMutex(t *testing.T) {
231 v := NewTestableUnixVolume(t, false, false)
234 v.Put(TEST_HASH, TEST_BLOCK)
236 mtx := NewMockMutex()
239 funcCalled := make(chan struct{})
240 go v.getFunc(v.blockPath(TEST_HASH), func(rdr io.Reader) error {
241 funcCalled <- struct{}{}
245 case mtx.AllowLock <- struct{}{}:
247 t.Fatal("Function was called before mutex was acquired")
248 case <-time.After(5 * time.Second):
249 t.Fatal("Timed out before mutex was acquired")
253 case mtx.AllowUnlock <- struct{}{}:
254 t.Fatal("Mutex was released before function was called")
255 case <-time.After(5 * time.Second):
256 t.Fatal("Timed out waiting for funcCalled")
259 case mtx.AllowUnlock <- struct{}{}:
260 case <-time.After(5 * time.Second):
261 t.Fatal("Timed out waiting for getFunc() to release mutex")
265 func TestUnixVolumeCompare(t *testing.T) {
266 v := NewTestableUnixVolume(t, false, false)
269 v.Put(TEST_HASH, TEST_BLOCK)
270 err := v.Compare(TEST_HASH, TEST_BLOCK)
272 t.Errorf("Got err %q, expected nil", err)
275 err = v.Compare(TEST_HASH, []byte("baddata"))
276 if err != CollisionError {
277 t.Errorf("Got err %q, expected %q", err, CollisionError)
280 v.Put(TEST_HASH, []byte("baddata"))
281 err = v.Compare(TEST_HASH, TEST_BLOCK)
282 if err != DiskHashError {
283 t.Errorf("Got err %q, expected %q", err, DiskHashError)
286 p := fmt.Sprintf("%s/%s/%s", v.root, TEST_HASH[:3], TEST_HASH)
288 err = v.Compare(TEST_HASH, TEST_BLOCK)
289 if err == nil || strings.Index(err.Error(), "permission denied") < 0 {
290 t.Errorf("Got err %q, expected %q", err, "permission denied")
294 // TODO(twp): show that the underlying Read/Write operations executed
295 // serially and not concurrently. The easiest way to do this is
296 // probably to activate verbose or debug logging, capture log output
297 // and examine it to confirm that Reads and Writes did not overlap.
299 // TODO(twp): a proper test of I/O serialization requires that a
300 // second request start while the first one is still underway.
301 // Guaranteeing that the test behaves this way requires some tricky
302 // synchronization and mocking. For now we'll just launch a bunch of
303 // requests simultaenously in goroutines and demonstrate that they
304 // return accurate results.