1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
19 "git.curoverse.com/arvados.git/sdk/go/arvados"
20 "github.com/AdRoll/goamz/s3"
21 "github.com/AdRoll/goamz/s3/s3test"
22 "github.com/ghodss/yaml"
23 "github.com/prometheus/client_golang/prometheus"
24 check "gopkg.in/check.v1"
28 TestBucketName = "testbucket"
31 type fakeClock struct {
35 func (c *fakeClock) Now() time.Time {
43 // Deleting isn't safe from races, but if it's turned on
44 // anyway we do expect it to pass the generic volume tests.
48 var _ = check.Suite(&StubbedS3Suite{})
50 type StubbedS3Suite struct {
51 volumes []*TestableS3Volume
54 func (s *StubbedS3Suite) TestGeneric(c *check.C) {
55 DoGenericVolumeTests(c, func(t TB) TestableVolume {
56 // Use a negative raceWindow so s3test's 1-second
57 // timestamp precision doesn't confuse fixRace.
58 return s.newTestableVolume(c, -2*time.Second, false, 2)
62 func (s *StubbedS3Suite) TestGenericReadOnly(c *check.C) {
63 DoGenericVolumeTests(c, func(t TB) TestableVolume {
64 return s.newTestableVolume(c, -2*time.Second, true, 2)
68 func (s *StubbedS3Suite) TestIndex(c *check.C) {
69 v := s.newTestableVolume(c, 0, false, 2)
71 for i := 0; i < 256; i++ {
72 v.PutRaw(fmt.Sprintf("%02x%030x", i, i), []byte{102, 111, 111})
74 for _, spec := range []struct {
83 buf := new(bytes.Buffer)
84 err := v.IndexTo(spec.prefix, buf)
85 c.Check(err, check.IsNil)
87 idx := bytes.SplitAfter(buf.Bytes(), []byte{10})
88 c.Check(len(idx), check.Equals, spec.expectMatch+1)
89 c.Check(len(idx[len(idx)-1]), check.Equals, 0)
93 func (s *StubbedS3Suite) TestStats(c *check.C) {
94 v := s.newTestableVolume(c, 5*time.Minute, false, 2)
95 stats := func() string {
96 buf, err := json.Marshal(v.InternalStats())
97 c.Check(err, check.IsNil)
101 c.Check(stats(), check.Matches, `.*"Ops":0,.*`)
103 loc := "acbd18db4cc2f85cedef654fccc4a4d8"
104 _, err := v.Get(context.Background(), loc, make([]byte, 3))
105 c.Check(err, check.NotNil)
106 c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
107 c.Check(stats(), check.Matches, `.*"\*s3.Error 404 [^"]*":[^0].*`)
108 c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
110 err = v.Put(context.Background(), loc, []byte("foo"))
111 c.Check(err, check.IsNil)
112 c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
113 c.Check(stats(), check.Matches, `.*"PutOps":2,.*`)
115 _, err = v.Get(context.Background(), loc, make([]byte, 3))
116 c.Check(err, check.IsNil)
117 _, err = v.Get(context.Background(), loc, make([]byte, 3))
118 c.Check(err, check.IsNil)
119 c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
122 type blockingHandler struct {
123 requested chan *http.Request
124 unblock chan struct{}
127 func (h *blockingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
128 if h.requested != nil {
131 if h.unblock != nil {
134 http.Error(w, "nothing here", http.StatusNotFound)
137 func (s *StubbedS3Suite) TestGetContextCancel(c *check.C) {
138 loc := "acbd18db4cc2f85cedef654fccc4a4d8"
139 buf := make([]byte, 3)
141 s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error {
142 _, err := v.Get(ctx, loc, buf)
147 func (s *StubbedS3Suite) TestCompareContextCancel(c *check.C) {
148 loc := "acbd18db4cc2f85cedef654fccc4a4d8"
151 s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error {
152 return v.Compare(ctx, loc, buf)
156 func (s *StubbedS3Suite) TestPutContextCancel(c *check.C) {
157 loc := "acbd18db4cc2f85cedef654fccc4a4d8"
160 s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error {
161 return v.Put(ctx, loc, buf)
165 func (s *StubbedS3Suite) testContextCancel(c *check.C, testFunc func(context.Context, *TestableS3Volume) error) {
166 handler := &blockingHandler{}
167 srv := httptest.NewServer(handler)
170 v := s.newTestableVolume(c, 5*time.Minute, false, 2)
172 vol.Endpoint = srv.URL
173 v = &TestableS3Volume{S3Volume: &vol}
174 metrics := newVolumeMetricsVecs(prometheus.NewRegistry())
177 ctx, cancel := context.WithCancel(context.Background())
179 handler.requested = make(chan *http.Request)
180 handler.unblock = make(chan struct{})
181 defer close(handler.unblock)
183 doneFunc := make(chan struct{})
185 err := testFunc(ctx, v)
186 c.Check(err, check.Equals, context.Canceled)
190 timeout := time.After(10 * time.Second)
192 // Wait for the stub server to receive a request, meaning
193 // Get() is waiting for an s3 operation.
196 c.Fatal("timed out waiting for test func to call our handler")
198 c.Fatal("test func finished without even calling our handler!")
199 case <-handler.requested:
211 func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
212 defer func(tl, bs arvados.Duration) {
213 theConfig.TrashLifetime = tl
214 theConfig.BlobSignatureTTL = bs
215 }(theConfig.TrashLifetime, theConfig.BlobSignatureTTL)
216 theConfig.TrashLifetime.Set("1h")
217 theConfig.BlobSignatureTTL.Set("1h")
219 v := s.newTestableVolume(c, 5*time.Minute, false, 2)
222 putS3Obj := func(t time.Time, key string, data []byte) {
226 v.serverClock.now = &t
227 v.bucket.Put(key, data, "application/octet-stream", s3ACL, s3.Options{})
232 for _, scenario := range []struct {
239 canGetAfterTrash bool
241 haveTrashAfterEmpty bool
245 "No related objects",
247 false, false, false, false, false, false,
250 // Stored by older version, or there was a
251 // race between EmptyTrash and Put: Trash is a
252 // no-op even though the data object is very
255 t0.Add(-48 * time.Hour), none, none,
256 true, true, true, false, false, false,
259 "Not trash, but old enough to be eligible for trash",
260 t0.Add(-24 * time.Hour), t0.Add(-2 * time.Hour), none,
261 true, true, false, false, false, false,
264 "Not trash, and not old enough to be eligible for trash",
265 t0.Add(-24 * time.Hour), t0.Add(-30 * time.Minute), none,
266 true, true, true, false, false, false,
269 "Trashed + untrashed copies exist, due to recent race between Trash and Put",
270 t0.Add(-24 * time.Hour), t0.Add(-3 * time.Minute), t0.Add(-2 * time.Minute),
271 true, true, true, true, true, false,
274 "Trashed + untrashed copies exist, trash nearly eligible for deletion: prone to Trash race",
275 t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-59 * time.Minute),
276 true, false, true, true, true, false,
279 "Trashed + untrashed copies exist, trash is eligible for deletion: prone to Trash race",
280 t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-61 * time.Minute),
281 true, false, true, true, false, false,
284 "Trashed + untrashed copies exist, due to old race between Put and unfinished Trash: emptying trash is unsafe",
285 t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-12 * time.Hour),
286 true, false, true, true, true, true,
289 "Trashed + untrashed copies exist, used to be unsafe to empty, but since made safe by fixRace+Touch",
290 t0.Add(-time.Second), t0.Add(-time.Second), t0.Add(-12 * time.Hour),
291 true, true, true, true, false, false,
294 "Trashed + untrashed copies exist because Trash operation was interrupted (no race)",
295 t0.Add(-24 * time.Hour), t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour),
296 true, false, true, true, false, false,
299 "Trash, not yet eligible for deletion",
300 none, t0.Add(-12 * time.Hour), t0.Add(-time.Minute),
301 false, false, false, true, true, false,
304 "Trash, not yet eligible for deletion, prone to races",
305 none, t0.Add(-12 * time.Hour), t0.Add(-59 * time.Minute),
306 false, false, false, true, true, false,
309 "Trash, eligible for deletion",
310 none, t0.Add(-12 * time.Hour), t0.Add(-2 * time.Hour),
311 false, false, false, true, false, false,
314 "Erroneously trashed during a race, detected before TrashLifetime",
315 none, t0.Add(-30 * time.Minute), t0.Add(-29 * time.Minute),
316 true, false, true, true, true, false,
319 "Erroneously trashed during a race, rescue during EmptyTrash despite reaching TrashLifetime",
320 none, t0.Add(-90 * time.Minute), t0.Add(-89 * time.Minute),
321 true, false, true, true, true, false,
324 "Trashed copy exists with no recent/* marker (cause unknown); repair by untrashing",
325 none, none, t0.Add(-time.Minute),
326 false, false, false, true, true, true,
329 c.Log("Scenario: ", scenario.label)
331 // We have a few tests to run for each scenario, and
332 // the tests are expected to change state. By calling
333 // this setup func between tests, we (re)create the
334 // scenario as specified, using a new unique block
335 // locator to prevent interference from previous
338 setupScenario := func() (string, []byte) {
340 blk := []byte(fmt.Sprintf("%d", nextKey))
341 loc := fmt.Sprintf("%x", md5.Sum(blk))
343 putS3Obj(scenario.dataT, loc, blk)
344 putS3Obj(scenario.recentT, "recent/"+loc, nil)
345 putS3Obj(scenario.trashT, "trash/"+loc, blk)
346 v.serverClock.now = &t0
351 loc, blk := setupScenario()
352 buf := make([]byte, len(blk))
353 _, err := v.Get(context.Background(), loc, buf)
354 c.Check(err == nil, check.Equals, scenario.canGet)
356 c.Check(os.IsNotExist(err), check.Equals, true)
359 // Call Trash, then check canTrash and canGetAfterTrash
360 loc, _ = setupScenario()
362 c.Check(err == nil, check.Equals, scenario.canTrash)
363 _, err = v.Get(context.Background(), loc, buf)
364 c.Check(err == nil, check.Equals, scenario.canGetAfterTrash)
366 c.Check(os.IsNotExist(err), check.Equals, true)
369 // Call Untrash, then check canUntrash
370 loc, _ = setupScenario()
372 c.Check(err == nil, check.Equals, scenario.canUntrash)
373 if scenario.dataT != none || scenario.trashT != none {
374 // In all scenarios where the data exists, we
375 // should be able to Get after Untrash --
376 // regardless of timestamps, errors, race
378 _, err = v.Get(context.Background(), loc, buf)
379 c.Check(err, check.IsNil)
382 // Call EmptyTrash, then check haveTrashAfterEmpty and
384 loc, _ = setupScenario()
386 _, err = v.bucket.Head("trash/"+loc, nil)
387 c.Check(err == nil, check.Equals, scenario.haveTrashAfterEmpty)
388 if scenario.freshAfterEmpty {
389 t, err := v.Mtime(loc)
390 c.Check(err, check.IsNil)
391 // new mtime must be current (with an
392 // allowance for 1s timestamp precision)
393 c.Check(t.After(t0.Add(-time.Second)), check.Equals, true)
396 // Check for current Mtime after Put (applies to all
398 loc, blk = setupScenario()
399 err = v.Put(context.Background(), loc, blk)
400 c.Check(err, check.IsNil)
401 t, err := v.Mtime(loc)
402 c.Check(err, check.IsNil)
403 c.Check(t.After(t0.Add(-time.Second)), check.Equals, true)
407 type TestableS3Volume struct {
409 server *s3test.Server
411 serverClock *fakeClock
414 func (s *StubbedS3Suite) newTestableVolume(c *check.C, raceWindow time.Duration, readonly bool, replication int) *TestableS3Volume {
415 clock := &fakeClock{}
416 srv, err := s3test.NewServer(&s3test.Config{Clock: clock})
417 c.Assert(err, check.IsNil)
419 v := &TestableS3Volume{
421 Bucket: TestBucketName,
423 Region: "test-region-1",
424 LocationConstraint: true,
425 RaceWindow: arvados.Duration(raceWindow),
426 S3Replication: replication,
427 UnsafeDelete: s3UnsafeDelete,
435 metrics := newVolumeMetricsVecs(prometheus.NewRegistry())
437 err = v.bucket.PutBucket(s3.ACL("private"))
438 c.Assert(err, check.IsNil)
442 func (s *StubbedS3Suite) TestConfig(c *check.C) {
444 err := yaml.Unmarshal([]byte(`
447 StorageClasses: ["class_a", "class_b"]
450 c.Check(err, check.IsNil)
451 c.Check(cfg.Volumes[0].GetStorageClasses(), check.DeepEquals, []string{"class_a", "class_b"})
454 func (v *TestableS3Volume) Start(vm *volumeMetricsVecs) error {
455 tmp, err := ioutil.TempFile("", "keepstore")
456 v.c.Assert(err, check.IsNil)
457 defer os.Remove(tmp.Name())
458 _, err = tmp.Write([]byte("xxx\n"))
459 v.c.Assert(err, check.IsNil)
460 v.c.Assert(tmp.Close(), check.IsNil)
462 v.S3Volume.AccessKeyFile = tmp.Name()
463 v.S3Volume.SecretKeyFile = tmp.Name()
465 v.c.Assert(v.S3Volume.Start(vm), check.IsNil)
469 // PutRaw skips the ContentMD5 test
470 func (v *TestableS3Volume) PutRaw(loc string, block []byte) {
471 err := v.bucket.Put(loc, block, "application/octet-stream", s3ACL, s3.Options{})
473 log.Printf("PutRaw: %s: %+v", loc, err)
475 err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
477 log.Printf("PutRaw: recent/%s: %+v", loc, err)
481 // TouchWithDate turns back the clock while doing a Touch(). We assume
482 // there are no other operations happening on the same s3test server
484 func (v *TestableS3Volume) TouchWithDate(locator string, lastPut time.Time) {
485 v.serverClock.now = &lastPut
486 err := v.bucket.Put("recent/"+locator, nil, "application/octet-stream", s3ACL, s3.Options{})
490 v.serverClock.now = nil
493 func (v *TestableS3Volume) Teardown() {
497 func (v *TestableS3Volume) ReadWriteOperationLabelValues() (r, w string) {