1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
18 "git.arvados.org/arvados.git/sdk/go/arvados"
19 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
20 "git.arvados.org/arvados.git/sdk/go/arvadostest"
21 "git.arvados.org/arvados.git/sdk/go/keepclient"
22 "github.com/AdRoll/goamz/aws"
23 "github.com/AdRoll/goamz/s3"
24 check "gopkg.in/check.v1"
29 ac *arvadosclient.ArvadosClient
30 kc *keepclient.KeepClient
33 coll arvados.Collection
37 func (s *IntegrationSuite) s3setup(c *check.C) s3stage {
38 var proj arvados.Group
39 var coll arvados.Collection
40 arv := arvados.NewClientFromEnv()
41 arv.AuthToken = arvadostest.ActiveToken
42 err := arv.RequestAndDecode(&proj, "POST", "arvados/v1/groups", nil, map[string]interface{}{
43 "group": map[string]interface{}{
44 "group_class": "project",
45 "name": "keep-web s3 test",
47 "ensure_unique_name": true,
49 c.Assert(err, check.IsNil)
50 err = arv.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{"collection": map[string]interface{}{
51 "owner_uuid": proj.UUID,
52 "name": "keep-web s3 test collection",
53 "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:emptyfile\n./emptydir d41d8cd98f00b204e9800998ecf8427e+0 0:0:.\n",
55 c.Assert(err, check.IsNil)
56 ac, err := arvadosclient.New(arv)
57 c.Assert(err, check.IsNil)
58 kc, err := keepclient.MakeKeepClient(ac)
59 c.Assert(err, check.IsNil)
60 fs, err := coll.FileSystem(arv, kc)
61 c.Assert(err, check.IsNil)
62 f, err := fs.OpenFile("sailboat.txt", os.O_CREATE|os.O_WRONLY, 0644)
63 c.Assert(err, check.IsNil)
64 _, err = f.Write([]byte("⛵\n"))
65 c.Assert(err, check.IsNil)
67 c.Assert(err, check.IsNil)
69 c.Assert(err, check.IsNil)
70 err = arv.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
71 c.Assert(err, check.IsNil)
73 auth := aws.NewAuth(arvadostest.ActiveTokenV2, arvadostest.ActiveTokenV2, "", time.Now().Add(time.Hour))
75 Name: s.testServer.Addr,
76 S3Endpoint: "http://" + s.testServer.Addr,
78 client := s3.New(*auth, region)
84 projbucket: &s3.Bucket{
89 collbucket: &s3.Bucket{
96 func (stage s3stage) teardown(c *check.C) {
97 if stage.coll.UUID != "" {
98 err := stage.arv.RequestAndDecode(&stage.coll, "DELETE", "arvados/v1/collections/"+stage.coll.UUID, nil, nil)
99 c.Check(err, check.IsNil)
101 if stage.proj.UUID != "" {
102 err := stage.arv.RequestAndDecode(&stage.proj, "DELETE", "arvados/v1/groups/"+stage.proj.UUID, nil, nil)
103 c.Check(err, check.IsNil)
107 func (s *IntegrationSuite) TestS3HeadBucket(c *check.C) {
108 stage := s.s3setup(c)
109 defer stage.teardown(c)
111 for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
112 c.Logf("bucket %s", bucket.Name)
113 exists, err := bucket.Exists("")
114 c.Check(err, check.IsNil)
115 c.Check(exists, check.Equals, true)
119 func (s *IntegrationSuite) TestS3CollectionGetObject(c *check.C) {
120 stage := s.s3setup(c)
121 defer stage.teardown(c)
122 s.testS3GetObject(c, stage.collbucket, "")
124 func (s *IntegrationSuite) TestS3ProjectGetObject(c *check.C) {
125 stage := s.s3setup(c)
126 defer stage.teardown(c)
127 s.testS3GetObject(c, stage.projbucket, stage.coll.Name+"/")
129 func (s *IntegrationSuite) testS3GetObject(c *check.C, bucket *s3.Bucket, prefix string) {
130 rdr, err := bucket.GetReader(prefix + "emptyfile")
131 c.Assert(err, check.IsNil)
132 buf, err := ioutil.ReadAll(rdr)
133 c.Check(err, check.IsNil)
134 c.Check(len(buf), check.Equals, 0)
136 c.Check(err, check.IsNil)
139 rdr, err = bucket.GetReader(prefix + "missingfile")
140 c.Check(err, check.ErrorMatches, `404 Not Found`)
143 exists, err := bucket.Exists(prefix + "missingfile")
144 c.Check(err, check.IsNil)
145 c.Check(exists, check.Equals, false)
148 rdr, err = bucket.GetReader(prefix + "sailboat.txt")
149 c.Assert(err, check.IsNil)
150 buf, err = ioutil.ReadAll(rdr)
151 c.Check(err, check.IsNil)
152 c.Check(buf, check.DeepEquals, []byte("⛵\n"))
154 c.Check(err, check.IsNil)
157 exists, err = bucket.Exists(prefix + "sailboat.txt")
158 c.Check(err, check.IsNil)
159 c.Check(exists, check.Equals, true)
162 func (s *IntegrationSuite) TestS3CollectionPutObjectSuccess(c *check.C) {
163 stage := s.s3setup(c)
164 defer stage.teardown(c)
165 s.testS3PutObjectSuccess(c, stage.collbucket, "")
167 func (s *IntegrationSuite) TestS3ProjectPutObjectSuccess(c *check.C) {
168 stage := s.s3setup(c)
169 defer stage.teardown(c)
170 s.testS3PutObjectSuccess(c, stage.projbucket, stage.coll.Name+"/")
172 func (s *IntegrationSuite) testS3PutObjectSuccess(c *check.C, bucket *s3.Bucket, prefix string) {
173 for _, trial := range []struct {
181 contentType: "application/octet-stream",
183 path: "newdir/newfile",
185 contentType: "application/octet-stream",
187 path: "newdir1/newdir2/newfile",
189 contentType: "application/octet-stream",
191 path: "newdir1/newdir2/newdir3/",
193 contentType: "application/x-directory",
196 c.Logf("=== %v", trial)
198 objname := prefix + trial.path
200 _, err := bucket.GetReader(objname)
201 c.Assert(err, check.ErrorMatches, `404 Not Found`)
203 buf := make([]byte, trial.size)
206 err = bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), trial.contentType, s3.Private, s3.Options{})
207 c.Check(err, check.IsNil)
209 rdr, err := bucket.GetReader(objname)
210 if strings.HasSuffix(trial.path, "/") && !s.testServer.Config.cluster.Collections.S3FolderObjects {
211 c.Check(err, check.NotNil)
213 } else if !c.Check(err, check.IsNil) {
216 buf2, err := ioutil.ReadAll(rdr)
217 c.Check(err, check.IsNil)
218 c.Check(buf2, check.HasLen, len(buf))
219 c.Check(bytes.Equal(buf, buf2), check.Equals, true)
223 func (s *IntegrationSuite) TestS3ProjectPutObjectNotSupported(c *check.C) {
224 stage := s.s3setup(c)
225 defer stage.teardown(c)
226 bucket := stage.projbucket
228 for _, trial := range []struct {
236 contentType: "application/octet-stream",
238 path: "newdir/newfile",
240 contentType: "application/octet-stream",
244 contentType: "application/x-directory",
247 c.Logf("=== %v", trial)
249 _, err := bucket.GetReader(trial.path)
250 c.Assert(err, check.ErrorMatches, `404 Not Found`)
252 buf := make([]byte, trial.size)
255 err = bucket.PutReader(trial.path, bytes.NewReader(buf), int64(len(buf)), trial.contentType, s3.Private, s3.Options{})
256 c.Check(err, check.ErrorMatches, `400 Bad Request`)
258 _, err = bucket.GetReader(trial.path)
259 c.Assert(err, check.ErrorMatches, `404 Not Found`)
263 func (s *IntegrationSuite) TestS3CollectionDeleteObject(c *check.C) {
264 stage := s.s3setup(c)
265 defer stage.teardown(c)
266 s.testS3DeleteObject(c, stage.collbucket, "")
268 func (s *IntegrationSuite) TestS3ProjectDeleteObject(c *check.C) {
269 stage := s.s3setup(c)
270 defer stage.teardown(c)
271 s.testS3DeleteObject(c, stage.projbucket, stage.coll.Name+"/")
273 func (s *IntegrationSuite) testS3DeleteObject(c *check.C, bucket *s3.Bucket, prefix string) {
274 s.testServer.Config.cluster.Collections.S3FolderObjects = true
275 for _, trial := range []struct {
286 objname := prefix + trial.path
287 comment := check.Commentf("objname %q", objname)
289 err := bucket.Del(objname)
290 if trial.path == "/" {
291 c.Check(err, check.NotNil)
294 c.Check(err, check.IsNil, comment)
295 _, err = bucket.GetReader(objname)
296 c.Check(err, check.NotNil, comment)
300 func (s *IntegrationSuite) TestS3CollectionPutObjectFailure(c *check.C) {
301 stage := s.s3setup(c)
302 defer stage.teardown(c)
303 s.testS3PutObjectFailure(c, stage.collbucket, "")
305 func (s *IntegrationSuite) TestS3ProjectPutObjectFailure(c *check.C) {
306 stage := s.s3setup(c)
307 defer stage.teardown(c)
308 s.testS3PutObjectFailure(c, stage.projbucket, stage.coll.Name+"/")
310 func (s *IntegrationSuite) testS3PutObjectFailure(c *check.C, bucket *s3.Bucket, prefix string) {
311 s.testServer.Config.cluster.Collections.S3FolderObjects = false
312 var wg sync.WaitGroup
313 for _, trial := range []struct {
317 path: "emptyfile/newname", // emptyfile exists, see s3setup()
319 path: "emptyfile/", // emptyfile exists, see s3setup()
321 path: "emptydir", // dir already exists, see s3setup()
344 c.Logf("=== %v", trial)
346 objname := prefix + trial.path
348 buf := make([]byte, 1234)
351 err := bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), "application/octet-stream", s3.Private, s3.Options{})
352 if !c.Check(err, check.ErrorMatches, `400 Bad.*`, check.Commentf("PUT %q should fail", objname)) {
356 if objname != "" && objname != "/" {
357 _, err = bucket.GetReader(objname)
358 c.Check(err, check.ErrorMatches, `404 Not Found`, check.Commentf("GET %q should return 404", objname))
365 func (stage *s3stage) writeBigDirs(c *check.C, dirs int, filesPerDir int) {
366 fs, err := stage.coll.FileSystem(stage.arv, stage.kc)
367 c.Assert(err, check.IsNil)
368 for d := 0; d < dirs; d++ {
369 dir := fmt.Sprintf("dir%d", d)
370 c.Assert(fs.Mkdir(dir, 0755), check.IsNil)
371 for i := 0; i < filesPerDir; i++ {
372 f, err := fs.OpenFile(fmt.Sprintf("%s/file%d.txt", dir, i), os.O_CREATE|os.O_WRONLY, 0644)
373 c.Assert(err, check.IsNil)
374 c.Assert(f.Close(), check.IsNil)
377 c.Assert(fs.Sync(), check.IsNil)
380 func (s *IntegrationSuite) TestS3GetBucketVersioning(c *check.C) {
381 stage := s.s3setup(c)
382 defer stage.teardown(c)
383 for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
384 req, err := http.NewRequest("GET", bucket.URL("/"), nil)
385 c.Check(err, check.IsNil)
386 req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
387 req.URL.RawQuery = "versioning"
388 resp, err := http.DefaultClient.Do(req)
389 c.Assert(err, check.IsNil)
390 c.Check(resp.Header.Get("Content-Type"), check.Equals, "application/xml")
391 buf, err := ioutil.ReadAll(resp.Body)
392 c.Assert(err, check.IsNil)
393 c.Check(string(buf), check.Equals, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>\n")
397 func (s *IntegrationSuite) TestS3CollectionList(c *check.C) {
398 stage := s.s3setup(c)
399 defer stage.teardown(c)
402 for markers, s.testServer.Config.cluster.Collections.S3FolderObjects = range []bool{false, true} {
405 stage.writeBigDirs(c, dirs, filesPerDir)
406 // Total # objects is:
407 // 2 file entries from s3setup (emptyfile and sailboat.txt)
408 // +1 fake "directory" marker from s3setup (emptydir) (if enabled)
409 // +dirs fake "directory" marker from writeBigDirs (dir0/, dir1/) (if enabled)
410 // +filesPerDir*dirs file entries from writeBigDirs (dir0/file0.txt, etc.)
411 s.testS3List(c, stage.collbucket, "", 4000, markers+2+(filesPerDir+markers)*dirs)
412 s.testS3List(c, stage.collbucket, "", 131, markers+2+(filesPerDir+markers)*dirs)
413 s.testS3List(c, stage.collbucket, "dir0/", 71, filesPerDir+markers)
416 func (s *IntegrationSuite) testS3List(c *check.C, bucket *s3.Bucket, prefix string, pageSize, expectFiles int) {
417 c.Logf("testS3List: prefix=%q pageSize=%d S3FolderObjects=%v", prefix, pageSize, s.testServer.Config.cluster.Collections.S3FolderObjects)
418 expectPageSize := pageSize
419 if expectPageSize > 1000 {
420 expectPageSize = 1000
422 gotKeys := map[string]s3.Key{}
426 resp, err := bucket.List(prefix, "", nextMarker, pageSize)
427 if !c.Check(err, check.IsNil) {
430 c.Check(len(resp.Contents) <= expectPageSize, check.Equals, true)
431 if pages++; !c.Check(pages <= (expectFiles/expectPageSize)+1, check.Equals, true) {
434 for _, key := range resp.Contents {
435 gotKeys[key.Key] = key
436 if strings.Contains(key.Key, "sailboat.txt") {
437 c.Check(key.Size, check.Equals, int64(4))
440 if !resp.IsTruncated {
441 c.Check(resp.NextMarker, check.Equals, "")
444 if !c.Check(resp.NextMarker, check.Not(check.Equals), "") {
447 nextMarker = resp.NextMarker
449 c.Check(len(gotKeys), check.Equals, expectFiles)
452 func (s *IntegrationSuite) TestS3CollectionListRollup(c *check.C) {
453 for _, s.testServer.Config.cluster.Collections.S3FolderObjects = range []bool{false, true} {
454 s.testS3CollectionListRollup(c)
458 func (s *IntegrationSuite) testS3CollectionListRollup(c *check.C) {
459 stage := s.s3setup(c)
460 defer stage.teardown(c)
464 stage.writeBigDirs(c, dirs, filesPerDir)
465 err := stage.collbucket.PutReader("dingbats", &bytes.Buffer{}, 0, "application/octet-stream", s3.Private, s3.Options{})
466 c.Assert(err, check.IsNil)
467 var allfiles []string
468 for marker := ""; ; {
469 resp, err := stage.collbucket.List("", "", marker, 20000)
470 c.Check(err, check.IsNil)
471 for _, key := range resp.Contents {
472 if len(allfiles) == 0 || allfiles[len(allfiles)-1] != key.Key {
473 allfiles = append(allfiles, key.Key)
476 marker = resp.NextMarker
482 if s.testServer.Config.cluster.Collections.S3FolderObjects {
485 c.Check(allfiles, check.HasLen, dirs*(filesPerDir+markers)+3+markers)
487 gotDirMarker := map[string]bool{}
488 for _, name := range allfiles {
489 isDirMarker := strings.HasSuffix(name, "/")
491 c.Check(isDirMarker, check.Equals, false, check.Commentf("name %q", name))
492 } else if isDirMarker {
493 gotDirMarker[name] = true
494 } else if i := strings.LastIndex(name, "/"); i >= 0 {
495 c.Check(gotDirMarker[name[:i+1]], check.Equals, true, check.Commentf("name %q", name))
496 gotDirMarker[name[:i+1]] = true // skip redundant complaints about this dir marker
500 for _, trial := range []struct {
515 {"dir0", "/", "dir0/file14.txt"}, // no commonprefixes
516 {"", "", "dir0/file14.txt"}, // middle page, skip walking dir1
517 {"", "", "dir1/file14.txt"}, // middle page, skip walking dir0
518 {"", "", "dir1/file498.txt"}, // last page of results
519 {"dir1/file", "", "dir1/file498.txt"}, // last page of results, with prefix
520 {"dir1/file", "/", "dir1/file498.txt"}, // last page of results, with prefix + delimiter
521 {"dir1", "Z", "dir1/file498.txt"}, // delimiter "Z" never appears
522 {"dir2", "/", ""}, // prefix "dir2" does not exist
525 c.Logf("\n\n=== trial %+v markers=%d", trial, markers)
528 resp, err := stage.collbucket.List(trial.prefix, trial.delimiter, trial.marker, maxKeys)
529 c.Check(err, check.IsNil)
530 if resp.IsTruncated && trial.delimiter == "" {
531 // goamz List method fills in the missing
532 // NextMarker field if resp.IsTruncated, so
533 // now we can't really tell whether it was
534 // sent by the server or by goamz. In cases
535 // where it should be empty but isn't, assume
536 // it's goamz's fault.
540 var expectKeys []string
541 var expectPrefixes []string
542 var expectNextMarker string
543 var expectTruncated bool
544 for _, key := range allfiles {
545 full := len(expectKeys)+len(expectPrefixes) >= maxKeys
546 if !strings.HasPrefix(key, trial.prefix) || key < trial.marker {
548 } else if idx := strings.Index(key[len(trial.prefix):], trial.delimiter); trial.delimiter != "" && idx >= 0 {
549 prefix := key[:len(trial.prefix)+idx+1]
550 if len(expectPrefixes) > 0 && expectPrefixes[len(expectPrefixes)-1] == prefix {
551 // same prefix as previous key
553 expectNextMarker = key
554 expectTruncated = true
556 expectPrefixes = append(expectPrefixes, prefix)
559 if trial.delimiter != "" {
560 expectNextMarker = key
562 expectTruncated = true
565 expectKeys = append(expectKeys, key)
570 for _, key := range resp.Contents {
571 gotKeys = append(gotKeys, key.Key)
573 var gotPrefixes []string
574 for _, prefix := range resp.CommonPrefixes {
575 gotPrefixes = append(gotPrefixes, prefix)
577 commentf := check.Commentf("trial %+v markers=%d", trial, markers)
578 c.Check(gotKeys, check.DeepEquals, expectKeys, commentf)
579 c.Check(gotPrefixes, check.DeepEquals, expectPrefixes, commentf)
580 c.Check(resp.NextMarker, check.Equals, expectNextMarker, commentf)
581 c.Check(resp.IsTruncated, check.Equals, expectTruncated, commentf)
582 c.Logf("=== trial %+v keys %q prefixes %q nextMarker %q", trial, gotKeys, gotPrefixes, resp.NextMarker)