1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
21 "git.arvados.org/arvados.git/sdk/go/arvados"
22 "git.arvados.org/arvados.git/sdk/go/ctxlog"
24 "github.com/aws/aws-sdk-go-v2/aws"
25 "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
26 "github.com/aws/aws-sdk-go-v2/service/s3"
28 "github.com/johannesboyne/gofakes3"
29 "github.com/johannesboyne/gofakes3/backend/s3mem"
30 "github.com/prometheus/client_golang/prometheus"
31 "github.com/sirupsen/logrus"
32 check "gopkg.in/check.v1"
35 type s3fakeClock struct {
39 func (c *s3fakeClock) Now() time.Time {
41 return time.Now().UTC()
46 func (c *s3fakeClock) Since(t time.Time) time.Duration {
50 var _ = check.Suite(&stubbedS3Suite{})
52 var srv httptest.Server
54 type stubbedS3Suite struct {
55 s3server *httptest.Server
56 s3fakeClock *s3fakeClock
57 metadata *httptest.Server
58 cluster *arvados.Cluster
59 volumes []*testableS3Volume
62 func (s *stubbedS3Suite) SetUpTest(c *check.C) {
64 s.s3fakeClock = &s3fakeClock{}
66 s.cluster = testCluster(c)
67 s.cluster.Volumes = map[string]arvados.Volume{
68 "zzzzz-nyw5e-000000000000000": {Driver: "S3"},
69 "zzzzz-nyw5e-111111111111111": {Driver: "S3"},
73 func (s *stubbedS3Suite) TearDownTest(c *check.C) {
74 if s.s3server != nil {
79 func (s *stubbedS3Suite) TestGeneric(c *check.C) {
80 DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
81 // Use a negative raceWindow so s3test's 1-second
82 // timestamp precision doesn't confuse fixRace.
83 return s.newTestableVolume(c, params, -2*time.Second)
87 func (s *stubbedS3Suite) TestGenericReadOnly(c *check.C) {
88 DoGenericVolumeTests(c, true, func(t TB, params newVolumeParams) TestableVolume {
89 return s.newTestableVolume(c, params, -2*time.Second)
93 func (s *stubbedS3Suite) TestGenericWithPrefix(c *check.C) {
94 DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
95 v := s.newTestableVolume(c, params, -2*time.Second)
101 func (s *stubbedS3Suite) TestIndex(c *check.C) {
102 v := s.newTestableVolume(c, newVolumeParams{
104 ConfigVolume: arvados.Volume{Replication: 2},
105 MetricsVecs: newVolumeMetricsVecs(prometheus.NewRegistry()),
106 BufferPool: newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
109 for i := 0; i < 256; i++ {
110 err := v.blockWriteWithoutMD5Check(fmt.Sprintf("%02x%030x", i, i), []byte{102, 111, 111})
111 c.Assert(err, check.IsNil)
113 for _, spec := range []struct {
122 buf := new(bytes.Buffer)
123 err := v.Index(context.Background(), spec.prefix, buf)
124 c.Check(err, check.IsNil)
126 idx := bytes.SplitAfter(buf.Bytes(), []byte{10})
127 c.Check(len(idx), check.Equals, spec.expectMatch+1)
128 c.Check(len(idx[len(idx)-1]), check.Equals, 0)
132 func (s *stubbedS3Suite) TestSignature(c *check.C) {
133 var header http.Header
134 stub := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
139 // The aws-sdk-go-v2 driver only supports S3 V4 signatures. S3 v2 signatures are being phased out
140 // as of June 24, 2020. Cf. https://forums.aws.amazon.com/ann.jspa?annID=5816
142 S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
144 SecretAccessKey: "xxx",
146 Region: "test-region-1",
147 Bucket: "test-bucket-name",
150 logger: ctxlog.TestLogger(c),
151 metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
153 // Our test S3 server uses the older 'Path Style'
154 vol.usePathStyle = true
157 c.Check(err, check.IsNil)
158 err = vol.BlockWrite(context.Background(), "acbd18db4cc2f85cedef654fccc4a4d8", []byte("foo"))
159 c.Check(err, check.IsNil)
160 c.Check(header.Get("Authorization"), check.Matches, `AWS4-HMAC-SHA256 .*`)
163 func (s *stubbedS3Suite) TestIAMRoleCredentials(c *check.C) {
164 var reqHeader http.Header
165 stub := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
170 retrievedMetadata := false
171 s.metadata = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
172 retrievedMetadata = true
173 upd := time.Now().UTC().Add(-time.Hour).Format(time.RFC3339)
174 exp := time.Now().UTC().Add(time.Hour).Format(time.RFC3339)
175 c.Logf("metadata stub received request: %s %s", r.Method, r.URL.Path)
177 case r.URL.Path == "/latest/meta-data/iam/security-credentials/":
178 io.WriteString(w, "testcredential\n")
179 case r.URL.Path == "/latest/api/token",
180 r.URL.Path == "/latest/meta-data/iam/security-credentials/testcredential":
181 // Literal example from
182 // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials
183 // but with updated timestamps
184 io.WriteString(w, `{"Code":"Success","LastUpdated":"`+upd+`","Type":"AWS-HMAC","AccessKeyId":"ASIAIOSFODNN7EXAMPLE","SecretAccessKey":"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY","Token":"token","Expiration":"`+exp+`"}`)
186 w.WriteHeader(http.StatusNotFound)
189 defer s.metadata.Close()
192 S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
194 Region: "test-region-1",
195 Bucket: "test-bucket-name",
198 logger: ctxlog.TestLogger(c),
199 metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
201 err := v.check(s.metadata.URL + "/latest")
202 c.Check(err, check.IsNil)
203 resp, err := v.bucket.svc.ListBuckets(context.Background(), &s3.ListBucketsInput{})
204 c.Check(err, check.IsNil)
205 c.Check(resp.Buckets, check.HasLen, 0)
206 c.Check(retrievedMetadata, check.Equals, true)
207 c.Check(reqHeader.Get("Authorization"), check.Matches, `AWS4-HMAC-SHA256 Credential=ASIAIOSFODNN7EXAMPLE/\d+/test-region-1/s3/aws4_request, SignedHeaders=.*`)
209 retrievedMetadata = false
210 s.metadata = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
211 retrievedMetadata = true
212 c.Logf("metadata stub received request: %s %s", r.Method, r.URL.Path)
213 w.WriteHeader(http.StatusNotFound)
216 S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
217 Endpoint: "http://localhost:9",
218 Region: "test-region-1",
219 Bucket: "test-bucket-name",
222 logger: ctxlog.TestLogger(c),
223 metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
225 err = deadv.check(s.metadata.URL + "/latest")
226 c.Check(err, check.IsNil)
227 _, err = deadv.bucket.svc.ListBuckets(context.Background(), &s3.ListBucketsInput{})
228 c.Check(err, check.ErrorMatches, `(?s).*failed to refresh cached credentials, no EC2 IMDS role found.*`)
229 c.Check(err, check.ErrorMatches, `(?s).*404.*`)
230 c.Check(retrievedMetadata, check.Equals, true)
233 func (s *stubbedS3Suite) TestStats(c *check.C) {
234 v := s.newTestableVolume(c, newVolumeParams{
236 ConfigVolume: arvados.Volume{Replication: 2},
237 MetricsVecs: newVolumeMetricsVecs(prometheus.NewRegistry()),
238 BufferPool: newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
240 stats := func() string {
241 buf, err := json.Marshal(v.InternalStats())
242 c.Check(err, check.IsNil)
246 c.Check(stats(), check.Matches, `.*"Ops":0,.*`)
248 loc := "acbd18db4cc2f85cedef654fccc4a4d8"
249 err := v.BlockRead(context.Background(), loc, brdiscard)
250 c.Check(err, check.NotNil)
251 c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
252 c.Check(stats(), check.Matches, `.*"\*smithy.OperationError 404 NoSuchKey":[^0].*`)
253 c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
255 err = v.BlockWrite(context.Background(), loc, []byte("foo"))
256 c.Check(err, check.IsNil)
257 c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
258 c.Check(stats(), check.Matches, `.*"PutOps":2,.*`)
260 err = v.BlockRead(context.Background(), loc, brdiscard)
261 c.Check(err, check.IsNil)
262 err = v.BlockRead(context.Background(), loc, brdiscard)
263 c.Check(err, check.IsNil)
264 c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
267 type s3AWSBlockingHandler struct {
268 requested chan *http.Request
269 unblock chan struct{}
272 func (h *s3AWSBlockingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
273 if r.Method == "PUT" && !strings.Contains(strings.Trim(r.URL.Path, "/"), "/") {
274 // Accept PutBucket ("PUT /bucketname/"), called by
278 if h.requested != nil {
281 if h.unblock != nil {
284 http.Error(w, "nothing here", http.StatusNotFound)
287 func (s *stubbedS3Suite) TestGetContextCancel(c *check.C) {
288 s.testContextCancel(c, func(ctx context.Context, v *testableS3Volume) error {
289 return v.BlockRead(ctx, fooHash, brdiscard)
293 func (s *stubbedS3Suite) TestPutContextCancel(c *check.C) {
294 s.testContextCancel(c, func(ctx context.Context, v *testableS3Volume) error {
295 return v.BlockWrite(ctx, fooHash, []byte("foo"))
299 func (s *stubbedS3Suite) testContextCancel(c *check.C, testFunc func(context.Context, *testableS3Volume) error) {
300 handler := &s3AWSBlockingHandler{}
301 s.s3server = httptest.NewServer(handler)
302 defer s.s3server.Close()
304 v := s.newTestableVolume(c, newVolumeParams{
306 ConfigVolume: arvados.Volume{Replication: 2},
307 MetricsVecs: newVolumeMetricsVecs(prometheus.NewRegistry()),
308 BufferPool: newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
311 ctx, cancel := context.WithCancel(context.Background())
313 handler.requested = make(chan *http.Request)
314 handler.unblock = make(chan struct{})
315 defer close(handler.unblock)
317 doneFunc := make(chan struct{})
319 err := testFunc(ctx, v)
320 c.Check(err, check.Equals, context.Canceled)
324 timeout := time.After(10 * time.Second)
326 // Wait for the stub server to receive a request, meaning
327 // Get() is waiting for an s3 operation.
330 c.Fatal("timed out waiting for test func to call our handler")
332 c.Fatal("test func finished without even calling our handler!")
333 case <-handler.requested:
345 func (s *stubbedS3Suite) TestBackendStates(c *check.C) {
346 s.cluster.Collections.BlobTrashLifetime.Set("1h")
347 s.cluster.Collections.BlobSigningTTL.Set("1h")
349 v := s.newTestableVolume(c, newVolumeParams{
351 ConfigVolume: arvados.Volume{Replication: 2},
352 Logger: ctxlog.TestLogger(c),
353 MetricsVecs: newVolumeMetricsVecs(prometheus.NewRegistry()),
354 BufferPool: newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
358 putS3Obj := func(t time.Time, key string, data []byte) {
362 s.s3fakeClock.now = &t
363 uploader := manager.NewUploader(v.bucket.svc)
364 _, err := uploader.Upload(context.Background(), &s3.PutObjectInput{
365 Bucket: aws.String(v.bucket.bucket),
366 Key: aws.String(key),
367 Body: bytes.NewReader(data),
372 s.s3fakeClock.now = nil
381 for _, scenario := range []struct {
388 canGetAfterTrash bool
390 haveTrashAfterEmpty bool
394 "No related objects",
396 false, false, false, false, false, false,
399 // Stored by older version, or there was a
400 // race between EmptyTrash and Put: Trash is a
401 // no-op even though the data object is very
404 t0.Add(-48 * time.Hour), none, none,
405 true, true, true, false, false, false,
408 "Not trash, but old enough to be eligible for trash",
409 t0.Add(-24 * time.Hour), t0.Add(-2 * time.Hour), none,
410 true, true, false, false, false, false,
413 "Not trash, and not old enough to be eligible for trash",
414 t0.Add(-24 * time.Hour), t0.Add(-30 * time.Minute), none,
415 true, true, true, false, false, false,
418 "Trashed + untrashed copies exist, due to recent race between Trash and Put",
419 t0.Add(-24 * time.Hour), t0.Add(-3 * time.Minute), t0.Add(-2 * time.Minute),
420 true, true, true, true, true, false,
423 "Trashed + untrashed copies exist, trash nearly eligible for deletion: prone to Trash race",
424 t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-59 * time.Minute),
425 true, false, true, true, true, false,
428 "Trashed + untrashed copies exist, trash is eligible for deletion: prone to Trash race",
429 t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-61 * time.Minute),
430 true, false, true, true, false, false,
433 "Trashed + untrashed copies exist, due to old race between Put and unfinished Trash: emptying trash is unsafe",
434 t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour), t0.Add(-12 * time.Hour),
435 true, false, true, true, true, true,
438 "Trashed + untrashed copies exist, used to be unsafe to empty, but since made safe by fixRace+Touch",
439 t0.Add(-time.Second), t0.Add(-time.Second), t0.Add(-12 * time.Hour),
440 true, true, true, true, false, false,
443 "Trashed + untrashed copies exist because Trash operation was interrupted (no race)",
444 t0.Add(-24 * time.Hour), t0.Add(-24 * time.Hour), t0.Add(-12 * time.Hour),
445 true, false, true, true, false, false,
448 "Trash, not yet eligible for deletion",
449 none, t0.Add(-12 * time.Hour), t0.Add(-time.Minute),
450 false, false, false, true, true, false,
453 "Trash, not yet eligible for deletion, prone to races",
454 none, t0.Add(-12 * time.Hour), t0.Add(-59 * time.Minute),
455 false, false, false, true, true, false,
458 "Trash, eligible for deletion",
459 none, t0.Add(-12 * time.Hour), t0.Add(-2 * time.Hour),
460 false, false, false, true, false, false,
463 "Erroneously trashed during a race, detected before BlobTrashLifetime",
464 none, t0.Add(-30 * time.Minute), t0.Add(-29 * time.Minute),
465 true, false, true, true, true, false,
468 "Erroneously trashed during a race, rescue during EmptyTrash despite reaching BlobTrashLifetime",
469 none, t0.Add(-90 * time.Minute), t0.Add(-89 * time.Minute),
470 true, false, true, true, true, false,
473 "Trashed copy exists with no recent/* marker (cause unknown); repair by untrashing",
474 none, none, t0.Add(-time.Minute),
475 false, false, false, true, true, true,
478 for _, prefixLength := range []int{0, 3} {
479 v.PrefixLength = prefixLength
480 c.Logf("Scenario: %q (prefixLength=%d)", scenario.label, prefixLength)
482 // We have a few tests to run for each scenario, and
483 // the tests are expected to change state. By calling
484 // this setup func between tests, we (re)create the
485 // scenario as specified, using a new unique block
486 // locator to prevent interference from previous
489 setupScenario := func() (string, []byte) {
491 blk := []byte(fmt.Sprintf("%d", nextKey))
492 loc := fmt.Sprintf("%x", md5.Sum(blk))
494 if prefixLength > 0 {
495 key = loc[:prefixLength] + "/" + loc
497 c.Log("\t", loc, "\t", key)
498 putS3Obj(scenario.dataT, key, blk)
499 putS3Obj(scenario.recentT, "recent/"+key, nil)
500 putS3Obj(scenario.trashT, "trash/"+key, blk)
501 v.s3fakeClock.now = &t0
506 loc, blk := setupScenario()
507 err := v.BlockRead(context.Background(), loc, brdiscard)
508 c.Check(err == nil, check.Equals, scenario.canGet, check.Commentf("err was %+v", err))
510 c.Check(os.IsNotExist(err), check.Equals, true)
513 // Call Trash, then check canTrash and canGetAfterTrash
514 loc, _ = setupScenario()
515 err = v.BlockTrash(loc)
516 c.Check(err == nil, check.Equals, scenario.canTrash)
517 err = v.BlockRead(context.Background(), loc, brdiscard)
518 c.Check(err == nil, check.Equals, scenario.canGetAfterTrash)
520 c.Check(os.IsNotExist(err), check.Equals, true)
523 // Call Untrash, then check canUntrash
524 loc, _ = setupScenario()
525 err = v.BlockUntrash(loc)
526 c.Check(err == nil, check.Equals, scenario.canUntrash)
527 if scenario.dataT != none || scenario.trashT != none {
528 // In all scenarios where the data exists, we
529 // should be able to Get after Untrash --
530 // regardless of timestamps, errors, race
532 err = v.BlockRead(context.Background(), loc, brdiscard)
533 c.Check(err, check.IsNil)
536 // Call EmptyTrash, then check haveTrashAfterEmpty and
538 loc, _ = setupScenario()
540 _, err = v.head("trash/" + v.key(loc))
541 c.Check(err == nil, check.Equals, scenario.haveTrashAfterEmpty)
542 if scenario.freshAfterEmpty {
543 t, err := v.Mtime(loc)
544 c.Check(err, check.IsNil)
545 // new mtime must be current (with an
546 // allowance for 1s timestamp precision)
547 c.Check(t.After(t0.Add(-time.Second)), check.Equals, true)
550 // Check for current Mtime after Put (applies to all
552 loc, blk = setupScenario()
553 err = v.BlockWrite(context.Background(), loc, blk)
554 c.Check(err, check.IsNil)
555 t, err := v.Mtime(loc)
556 c.Check(err, check.IsNil)
557 c.Check(t.After(t0.Add(-time.Second)), check.Equals, true)
562 type testableS3Volume struct {
564 server *httptest.Server
566 s3fakeClock *s3fakeClock
569 type gofakes3logger struct {
573 func (l gofakes3logger) Print(level gofakes3.LogLevel, v ...interface{}) {
575 case gofakes3.LogErr:
577 case gofakes3.LogWarn:
579 case gofakes3.LogInfo:
582 panic("unknown level")
586 var testBucketSerial atomic.Int64
588 func (s *stubbedS3Suite) newTestableVolume(c *check.C, params newVolumeParams, raceWindow time.Duration) *testableS3Volume {
589 if params.Logger == nil {
590 params.Logger = ctxlog.TestLogger(c)
592 if s.s3server == nil {
593 backend := s3mem.New(s3mem.WithTimeSource(s.s3fakeClock))
594 logger := ctxlog.TestLogger(c)
595 faker := gofakes3.New(backend,
596 gofakes3.WithTimeSource(s.s3fakeClock),
597 gofakes3.WithLogger(gofakes3logger{FieldLogger: logger}),
598 gofakes3.WithTimeSkewLimit(0))
599 s.s3server = httptest.NewServer(faker.Server())
601 endpoint := s.s3server.URL
602 bucketName := fmt.Sprintf("testbucket%d", testBucketSerial.Add(1))
604 var metadataURL, accessKey, secretKey string
605 if s.metadata != nil {
606 metadataURL = s.metadata.URL
608 accessKey, secretKey = "xxx", "xxx"
611 v := &testableS3Volume{
613 S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
614 AccessKeyID: accessKey,
615 SecretAccessKey: secretKey,
618 Region: "test-region-1",
619 LocationConstraint: true,
623 cluster: params.Cluster,
624 volume: params.ConfigVolume,
625 logger: params.Logger,
626 metrics: params.MetricsVecs,
627 bufferPool: params.BufferPool,
631 s3fakeClock: s.s3fakeClock,
633 c.Assert(v.s3Volume.check(metadataURL), check.IsNil)
634 // Create the testbucket
635 input := &s3.CreateBucketInput{
636 Bucket: aws.String(bucketName),
638 _, err := v.s3Volume.bucket.svc.CreateBucket(context.Background(), input)
639 c.Assert(err, check.IsNil)
640 // We couldn't set RaceWindow until now because check()
641 // rejects negative values.
642 v.s3Volume.RaceWindow = arvados.Duration(raceWindow)
646 func (v *testableS3Volume) blockWriteWithoutMD5Check(loc string, block []byte) error {
648 r := newCountingReader(bytes.NewReader(block), v.bucket.stats.TickOutBytes)
650 uploader := manager.NewUploader(v.bucket.svc, func(u *manager.Uploader) {
651 u.PartSize = 5 * 1024 * 1024
655 _, err := uploader.Upload(context.Background(), &s3.PutObjectInput{
656 Bucket: aws.String(v.bucket.bucket),
657 Key: aws.String(key),
664 empty := bytes.NewReader([]byte{})
665 _, err = uploader.Upload(context.Background(), &s3.PutObjectInput{
666 Bucket: aws.String(v.bucket.bucket),
667 Key: aws.String("recent/" + key),
673 // TouchWithDate turns back the clock while doing a Touch(). We assume
674 // there are no other operations happening on the same s3test server
676 func (v *testableS3Volume) TouchWithDate(loc string, lastPut time.Time) {
677 v.s3fakeClock.now = &lastPut
679 uploader := manager.NewUploader(v.bucket.svc)
680 empty := bytes.NewReader([]byte{})
681 _, err := uploader.Upload(context.Background(), &s3.PutObjectInput{
682 Bucket: aws.String(v.bucket.bucket),
683 Key: aws.String("recent/" + v.key(loc)),
690 v.s3fakeClock.now = nil
693 func (v *testableS3Volume) Teardown() {
696 func (v *testableS3Volume) ReadWriteOperationLabelValues() (r, w string) {