1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
18 "git.arvados.org/arvados.git/sdk/go/arvados"
19 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
20 "git.arvados.org/arvados.git/sdk/go/arvadostest"
21 "git.arvados.org/arvados.git/sdk/go/keepclient"
22 "github.com/AdRoll/goamz/aws"
23 "github.com/AdRoll/goamz/s3"
24 check "gopkg.in/check.v1"
29 ac *arvadosclient.ArvadosClient
30 kc *keepclient.KeepClient
33 coll arvados.Collection
37 func (s *IntegrationSuite) s3setup(c *check.C) s3stage {
38 var proj arvados.Group
39 var coll arvados.Collection
40 arv := arvados.NewClientFromEnv()
41 arv.AuthToken = arvadostest.ActiveToken
42 err := arv.RequestAndDecode(&proj, "POST", "arvados/v1/groups", nil, map[string]interface{}{
43 "group": map[string]interface{}{
44 "group_class": "project",
45 "name": "keep-web s3 test",
47 "ensure_unique_name": true,
49 c.Assert(err, check.IsNil)
50 err = arv.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{"collection": map[string]interface{}{
51 "owner_uuid": proj.UUID,
52 "name": "keep-web s3 test collection",
53 "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:emptyfile\n./emptydir d41d8cd98f00b204e9800998ecf8427e+0 0:0:.\n",
55 c.Assert(err, check.IsNil)
56 ac, err := arvadosclient.New(arv)
57 c.Assert(err, check.IsNil)
58 kc, err := keepclient.MakeKeepClient(ac)
59 c.Assert(err, check.IsNil)
60 fs, err := coll.FileSystem(arv, kc)
61 c.Assert(err, check.IsNil)
62 f, err := fs.OpenFile("sailboat.txt", os.O_CREATE|os.O_WRONLY, 0644)
63 c.Assert(err, check.IsNil)
64 _, err = f.Write([]byte("⛵\n"))
65 c.Assert(err, check.IsNil)
67 c.Assert(err, check.IsNil)
69 c.Assert(err, check.IsNil)
70 err = arv.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
71 c.Assert(err, check.IsNil)
73 auth := aws.NewAuth(arvadostest.ActiveTokenV2, arvadostest.ActiveTokenV2, "", time.Now().Add(time.Hour))
75 Name: s.testServer.Addr,
76 S3Endpoint: "http://" + s.testServer.Addr,
78 client := s3.New(*auth, region)
84 projbucket: &s3.Bucket{
89 collbucket: &s3.Bucket{
96 func (stage s3stage) teardown(c *check.C) {
97 if stage.coll.UUID != "" {
98 err := stage.arv.RequestAndDecode(&stage.coll, "DELETE", "arvados/v1/collections/"+stage.coll.UUID, nil, nil)
99 c.Check(err, check.IsNil)
101 if stage.proj.UUID != "" {
102 err := stage.arv.RequestAndDecode(&stage.proj, "DELETE", "arvados/v1/groups/"+stage.proj.UUID, nil, nil)
103 c.Check(err, check.IsNil)
107 func (s *IntegrationSuite) TestS3HeadBucket(c *check.C) {
108 stage := s.s3setup(c)
109 defer stage.teardown(c)
111 for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
112 c.Logf("bucket %s", bucket.Name)
113 exists, err := bucket.Exists("")
114 c.Check(err, check.IsNil)
115 c.Check(exists, check.Equals, true)
119 func (s *IntegrationSuite) TestS3CollectionGetObject(c *check.C) {
120 stage := s.s3setup(c)
121 defer stage.teardown(c)
122 s.testS3GetObject(c, stage.collbucket, "")
124 func (s *IntegrationSuite) TestS3ProjectGetObject(c *check.C) {
125 stage := s.s3setup(c)
126 defer stage.teardown(c)
127 s.testS3GetObject(c, stage.projbucket, stage.coll.Name+"/")
129 func (s *IntegrationSuite) testS3GetObject(c *check.C, bucket *s3.Bucket, prefix string) {
130 rdr, err := bucket.GetReader(prefix + "emptyfile")
131 c.Assert(err, check.IsNil)
132 buf, err := ioutil.ReadAll(rdr)
133 c.Check(err, check.IsNil)
134 c.Check(len(buf), check.Equals, 0)
136 c.Check(err, check.IsNil)
139 rdr, err = bucket.GetReader(prefix + "missingfile")
140 c.Check(err, check.ErrorMatches, `404 Not Found`)
143 exists, err := bucket.Exists(prefix + "missingfile")
144 c.Check(err, check.IsNil)
145 c.Check(exists, check.Equals, false)
148 rdr, err = bucket.GetReader(prefix + "sailboat.txt")
149 c.Assert(err, check.IsNil)
150 buf, err = ioutil.ReadAll(rdr)
151 c.Check(err, check.IsNil)
152 c.Check(buf, check.DeepEquals, []byte("⛵\n"))
154 c.Check(err, check.IsNil)
157 exists, err = bucket.Exists(prefix + "sailboat.txt")
158 c.Check(err, check.IsNil)
159 c.Check(exists, check.Equals, true)
162 func (s *IntegrationSuite) TestS3CollectionPutObjectSuccess(c *check.C) {
163 stage := s.s3setup(c)
164 defer stage.teardown(c)
165 s.testS3PutObjectSuccess(c, stage.collbucket, "")
167 func (s *IntegrationSuite) TestS3ProjectPutObjectSuccess(c *check.C) {
168 stage := s.s3setup(c)
169 defer stage.teardown(c)
170 s.testS3PutObjectSuccess(c, stage.projbucket, stage.coll.Name+"/")
172 func (s *IntegrationSuite) testS3PutObjectSuccess(c *check.C, bucket *s3.Bucket, prefix string) {
173 for _, trial := range []struct {
181 contentType: "application/octet-stream",
183 path: "newdir/newfile",
185 contentType: "application/octet-stream",
187 path: "newdir1/newdir2/newfile",
189 contentType: "application/octet-stream",
191 path: "newdir1/newdir2/newdir3/",
193 contentType: "application/x-directory",
196 c.Logf("=== %v", trial)
198 objname := prefix + trial.path
200 _, err := bucket.GetReader(objname)
201 c.Assert(err, check.ErrorMatches, `404 Not Found`)
203 buf := make([]byte, trial.size)
206 err = bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), trial.contentType, s3.Private, s3.Options{})
207 c.Check(err, check.IsNil)
209 rdr, err := bucket.GetReader(objname)
210 if strings.HasSuffix(trial.path, "/") && !s.testServer.Config.cluster.Collections.S3FolderObjects {
211 c.Check(err, check.NotNil)
213 } else if !c.Check(err, check.IsNil) {
216 buf2, err := ioutil.ReadAll(rdr)
217 c.Check(err, check.IsNil)
218 c.Check(buf2, check.HasLen, len(buf))
219 c.Check(bytes.Equal(buf, buf2), check.Equals, true)
223 func (s *IntegrationSuite) TestS3ProjectPutObjectNotSupported(c *check.C) {
224 stage := s.s3setup(c)
225 defer stage.teardown(c)
226 bucket := stage.projbucket
228 for _, trial := range []struct {
236 contentType: "application/octet-stream",
238 path: "newdir/newfile",
240 contentType: "application/octet-stream",
244 contentType: "application/x-directory",
247 c.Logf("=== %v", trial)
249 _, err := bucket.GetReader(trial.path)
250 c.Assert(err, check.ErrorMatches, `404 Not Found`)
252 buf := make([]byte, trial.size)
255 err = bucket.PutReader(trial.path, bytes.NewReader(buf), int64(len(buf)), trial.contentType, s3.Private, s3.Options{})
256 c.Check(err, check.ErrorMatches, `400 Bad Request`)
258 _, err = bucket.GetReader(trial.path)
259 c.Assert(err, check.ErrorMatches, `404 Not Found`)
263 func (s *IntegrationSuite) TestS3CollectionPutObjectFailure(c *check.C) {
264 stage := s.s3setup(c)
265 defer stage.teardown(c)
266 s.testS3PutObjectFailure(c, stage.collbucket, "")
268 func (s *IntegrationSuite) TestS3ProjectPutObjectFailure(c *check.C) {
269 stage := s.s3setup(c)
270 defer stage.teardown(c)
271 s.testS3PutObjectFailure(c, stage.projbucket, stage.coll.Name+"/")
273 func (s *IntegrationSuite) testS3PutObjectFailure(c *check.C, bucket *s3.Bucket, prefix string) {
274 s.testServer.Config.cluster.Collections.S3FolderObjects = false
275 var wg sync.WaitGroup
276 for _, trial := range []struct {
280 path: "emptyfile/newname", // emptyfile exists, see s3setup()
282 path: "emptyfile/", // emptyfile exists, see s3setup()
284 path: "emptydir", // dir already exists, see s3setup()
307 c.Logf("=== %v", trial)
309 objname := prefix + trial.path
311 buf := make([]byte, 1234)
314 err := bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), "application/octet-stream", s3.Private, s3.Options{})
315 if !c.Check(err, check.ErrorMatches, `400 Bad.*`, check.Commentf("PUT %q should fail", objname)) {
319 if objname != "" && objname != "/" {
320 _, err = bucket.GetReader(objname)
321 c.Check(err, check.ErrorMatches, `404 Not Found`, check.Commentf("GET %q should return 404", objname))
328 func (stage *s3stage) writeBigDirs(c *check.C, dirs int, filesPerDir int) {
329 fs, err := stage.coll.FileSystem(stage.arv, stage.kc)
330 c.Assert(err, check.IsNil)
331 for d := 0; d < dirs; d++ {
332 dir := fmt.Sprintf("dir%d", d)
333 c.Assert(fs.Mkdir(dir, 0755), check.IsNil)
334 for i := 0; i < filesPerDir; i++ {
335 f, err := fs.OpenFile(fmt.Sprintf("%s/file%d.txt", dir, i), os.O_CREATE|os.O_WRONLY, 0644)
336 c.Assert(err, check.IsNil)
337 c.Assert(f.Close(), check.IsNil)
340 c.Assert(fs.Sync(), check.IsNil)
343 func (s *IntegrationSuite) TestS3GetBucketVersioning(c *check.C) {
344 stage := s.s3setup(c)
345 defer stage.teardown(c)
346 for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
347 req, err := http.NewRequest("GET", bucket.URL("/"), nil)
348 req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
349 req.URL.RawQuery = "versioning"
350 resp, err := http.DefaultClient.Do(req)
351 c.Assert(err, check.IsNil)
352 c.Check(resp.Header.Get("Content-Type"), check.Equals, "application/xml")
353 buf, err := ioutil.ReadAll(resp.Body)
354 c.Assert(err, check.IsNil)
355 c.Check(string(buf), check.Equals, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>\n")
359 func (s *IntegrationSuite) TestS3CollectionList(c *check.C) {
360 stage := s.s3setup(c)
361 defer stage.teardown(c)
364 for markers, s.testServer.Config.cluster.Collections.S3FolderObjects = range []bool{false, true} {
367 stage.writeBigDirs(c, dirs, filesPerDir)
368 // Total # objects is:
369 // 2 file entries from s3setup (emptyfile and sailboat.txt)
370 // +1 fake "directory" marker from s3setup (emptydir) (if enabled)
371 // +dirs fake "directory" marker from writeBigDirs (dir0/, dir1/) (if enabled)
372 // +filesPerDir*dirs file entries from writeBigDirs (dir0/file0.txt, etc.)
373 s.testS3List(c, stage.collbucket, "", 4000, markers+2+(filesPerDir+markers)*dirs)
374 s.testS3List(c, stage.collbucket, "", 131, markers+2+(filesPerDir+markers)*dirs)
375 s.testS3List(c, stage.collbucket, "dir0/", 71, filesPerDir+markers)
378 func (s *IntegrationSuite) testS3List(c *check.C, bucket *s3.Bucket, prefix string, pageSize, expectFiles int) {
379 c.Logf("testS3List: prefix=%q pageSize=%d S3FolderObjects=%v", prefix, pageSize, s.testServer.Config.cluster.Collections.S3FolderObjects)
380 expectPageSize := pageSize
381 if expectPageSize > 1000 {
382 expectPageSize = 1000
384 gotKeys := map[string]s3.Key{}
388 resp, err := bucket.List(prefix, "", nextMarker, pageSize)
389 if !c.Check(err, check.IsNil) {
392 c.Check(len(resp.Contents) <= expectPageSize, check.Equals, true)
393 if pages++; !c.Check(pages <= (expectFiles/expectPageSize)+1, check.Equals, true) {
396 for _, key := range resp.Contents {
397 gotKeys[key.Key] = key
398 if strings.Contains(key.Key, "sailboat.txt") {
399 c.Check(key.Size, check.Equals, int64(4))
402 if !resp.IsTruncated {
403 c.Check(resp.NextMarker, check.Equals, "")
406 if !c.Check(resp.NextMarker, check.Not(check.Equals), "") {
409 nextMarker = resp.NextMarker
411 c.Check(len(gotKeys), check.Equals, expectFiles)
414 func (s *IntegrationSuite) TestS3CollectionListRollup(c *check.C) {
415 for _, s.testServer.Config.cluster.Collections.S3FolderObjects = range []bool{false, true} {
416 s.testS3CollectionListRollup(c)
420 func (s *IntegrationSuite) testS3CollectionListRollup(c *check.C) {
421 stage := s.s3setup(c)
422 defer stage.teardown(c)
426 stage.writeBigDirs(c, dirs, filesPerDir)
427 err := stage.collbucket.PutReader("dingbats", &bytes.Buffer{}, 0, "application/octet-stream", s3.Private, s3.Options{})
428 c.Assert(err, check.IsNil)
429 var allfiles []string
430 for marker := ""; ; {
431 resp, err := stage.collbucket.List("", "", marker, 20000)
432 c.Check(err, check.IsNil)
433 for _, key := range resp.Contents {
434 if len(allfiles) == 0 || allfiles[len(allfiles)-1] != key.Key {
435 allfiles = append(allfiles, key.Key)
438 marker = resp.NextMarker
444 if s.testServer.Config.cluster.Collections.S3FolderObjects {
447 c.Check(allfiles, check.HasLen, dirs*(filesPerDir+markers)+3+markers)
449 gotDirMarker := map[string]bool{}
450 for _, name := range allfiles {
451 isDirMarker := strings.HasSuffix(name, "/")
453 c.Check(isDirMarker, check.Equals, false, check.Commentf("name %q", name))
454 } else if isDirMarker {
455 gotDirMarker[name] = true
456 } else if i := strings.LastIndex(name, "/"); i >= 0 {
457 c.Check(gotDirMarker[name[:i+1]], check.Equals, true, check.Commentf("name %q", name))
458 gotDirMarker[name[:i+1]] = true // skip redundant complaints about this dir marker
462 for _, trial := range []struct {
477 {"dir0", "/", "dir0/file14.txt"}, // no commonprefixes
478 {"", "", "dir0/file14.txt"}, // middle page, skip walking dir1
479 {"", "", "dir1/file14.txt"}, // middle page, skip walking dir0
480 {"", "", "dir1/file498.txt"}, // last page of results
481 {"dir1/file", "", "dir1/file498.txt"}, // last page of results, with prefix
482 {"dir1/file", "/", "dir1/file498.txt"}, // last page of results, with prefix + delimiter
483 {"dir1", "Z", "dir1/file498.txt"}, // delimiter "Z" never appears
484 {"dir2", "/", ""}, // prefix "dir2" does not exist
487 c.Logf("\n\n=== trial %+v markers=%d", trial, markers)
490 resp, err := stage.collbucket.List(trial.prefix, trial.delimiter, trial.marker, maxKeys)
491 c.Check(err, check.IsNil)
492 if resp.IsTruncated && trial.delimiter == "" {
493 // goamz List method fills in the missing
494 // NextMarker field if resp.IsTruncated, so
495 // now we can't really tell whether it was
496 // sent by the server or by goamz. In cases
497 // where it should be empty but isn't, assume
498 // it's goamz's fault.
502 var expectKeys []string
503 var expectPrefixes []string
504 var expectNextMarker string
505 var expectTruncated bool
506 for _, key := range allfiles {
507 full := len(expectKeys)+len(expectPrefixes) >= maxKeys
508 if !strings.HasPrefix(key, trial.prefix) || key < trial.marker {
510 } else if idx := strings.Index(key[len(trial.prefix):], trial.delimiter); trial.delimiter != "" && idx >= 0 {
511 prefix := key[:len(trial.prefix)+idx+1]
512 if len(expectPrefixes) > 0 && expectPrefixes[len(expectPrefixes)-1] == prefix {
513 // same prefix as previous key
515 expectNextMarker = key
516 expectTruncated = true
518 expectPrefixes = append(expectPrefixes, prefix)
521 if trial.delimiter != "" {
522 expectNextMarker = key
524 expectTruncated = true
527 expectKeys = append(expectKeys, key)
532 for _, key := range resp.Contents {
533 gotKeys = append(gotKeys, key.Key)
535 var gotPrefixes []string
536 for _, prefix := range resp.CommonPrefixes {
537 gotPrefixes = append(gotPrefixes, prefix)
539 commentf := check.Commentf("trial %+v markers=%d", trial, markers)
540 c.Check(gotKeys, check.DeepEquals, expectKeys, commentf)
541 c.Check(gotPrefixes, check.DeepEquals, expectPrefixes, commentf)
542 c.Check(resp.NextMarker, check.Equals, expectNextMarker, commentf)
543 c.Check(resp.IsTruncated, check.Equals, expectTruncated, commentf)
544 c.Logf("=== trial %+v keys %q prefixes %q nextMarker %q", trial, gotKeys, gotPrefixes, resp.NextMarker)