1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
18 "git.arvados.org/arvados.git/sdk/go/arvados"
19 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
20 "git.arvados.org/arvados.git/sdk/go/arvadostest"
21 "git.arvados.org/arvados.git/sdk/go/keepclient"
22 "github.com/AdRoll/goamz/aws"
23 "github.com/AdRoll/goamz/s3"
24 check "gopkg.in/check.v1"
29 ac *arvadosclient.ArvadosClient
30 kc *keepclient.KeepClient
33 coll arvados.Collection
37 func (s *IntegrationSuite) s3setup(c *check.C) s3stage {
38 var proj arvados.Group
39 var coll arvados.Collection
40 arv := arvados.NewClientFromEnv()
41 arv.AuthToken = arvadostest.ActiveToken
42 err := arv.RequestAndDecode(&proj, "POST", "arvados/v1/groups", nil, map[string]interface{}{
43 "group": map[string]interface{}{
44 "group_class": "project",
45 "name": "keep-web s3 test",
47 "ensure_unique_name": true,
49 c.Assert(err, check.IsNil)
50 err = arv.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{"collection": map[string]interface{}{
51 "owner_uuid": proj.UUID,
52 "name": "keep-web s3 test collection",
53 "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:emptyfile\n./emptydir d41d8cd98f00b204e9800998ecf8427e+0 0:0:.\n",
55 c.Assert(err, check.IsNil)
56 ac, err := arvadosclient.New(arv)
57 c.Assert(err, check.IsNil)
58 kc, err := keepclient.MakeKeepClient(ac)
59 c.Assert(err, check.IsNil)
60 fs, err := coll.FileSystem(arv, kc)
61 c.Assert(err, check.IsNil)
62 f, err := fs.OpenFile("sailboat.txt", os.O_CREATE|os.O_WRONLY, 0644)
63 c.Assert(err, check.IsNil)
64 _, err = f.Write([]byte("⛵\n"))
65 c.Assert(err, check.IsNil)
67 c.Assert(err, check.IsNil)
69 c.Assert(err, check.IsNil)
70 err = arv.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
71 c.Assert(err, check.IsNil)
73 auth := aws.NewAuth(arvadostest.ActiveTokenV2, arvadostest.ActiveTokenV2, "", time.Now().Add(time.Hour))
75 Name: s.testServer.Addr,
76 S3Endpoint: "http://" + s.testServer.Addr,
78 client := s3.New(*auth, region)
84 projbucket: &s3.Bucket{
89 collbucket: &s3.Bucket{
96 func (stage s3stage) teardown(c *check.C) {
97 if stage.coll.UUID != "" {
98 err := stage.arv.RequestAndDecode(&stage.coll, "DELETE", "arvados/v1/collections/"+stage.coll.UUID, nil, nil)
99 c.Check(err, check.IsNil)
101 if stage.proj.UUID != "" {
102 err := stage.arv.RequestAndDecode(&stage.proj, "DELETE", "arvados/v1/groups/"+stage.proj.UUID, nil, nil)
103 c.Check(err, check.IsNil)
107 func (s *IntegrationSuite) TestS3HeadBucket(c *check.C) {
108 stage := s.s3setup(c)
109 defer stage.teardown(c)
111 for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
112 c.Logf("bucket %s", bucket.Name)
113 exists, err := bucket.Exists("")
114 c.Check(err, check.IsNil)
115 c.Check(exists, check.Equals, true)
119 func (s *IntegrationSuite) TestS3CollectionGetObject(c *check.C) {
120 stage := s.s3setup(c)
121 defer stage.teardown(c)
122 s.testS3GetObject(c, stage.collbucket, "")
124 func (s *IntegrationSuite) TestS3ProjectGetObject(c *check.C) {
125 stage := s.s3setup(c)
126 defer stage.teardown(c)
127 s.testS3GetObject(c, stage.projbucket, stage.coll.Name+"/")
129 func (s *IntegrationSuite) testS3GetObject(c *check.C, bucket *s3.Bucket, prefix string) {
130 rdr, err := bucket.GetReader(prefix + "emptyfile")
131 c.Assert(err, check.IsNil)
132 buf, err := ioutil.ReadAll(rdr)
133 c.Check(err, check.IsNil)
134 c.Check(len(buf), check.Equals, 0)
136 c.Check(err, check.IsNil)
139 rdr, err = bucket.GetReader(prefix + "missingfile")
140 c.Check(err, check.ErrorMatches, `404 Not Found`)
143 exists, err := bucket.Exists(prefix + "missingfile")
144 c.Check(err, check.IsNil)
145 c.Check(exists, check.Equals, false)
148 rdr, err = bucket.GetReader(prefix + "sailboat.txt")
149 c.Assert(err, check.IsNil)
150 buf, err = ioutil.ReadAll(rdr)
151 c.Check(err, check.IsNil)
152 c.Check(buf, check.DeepEquals, []byte("⛵\n"))
154 c.Check(err, check.IsNil)
157 exists, err = bucket.Exists(prefix + "sailboat.txt")
158 c.Check(err, check.IsNil)
159 c.Check(exists, check.Equals, true)
162 func (s *IntegrationSuite) TestS3CollectionPutObjectSuccess(c *check.C) {
163 stage := s.s3setup(c)
164 defer stage.teardown(c)
165 s.testS3PutObjectSuccess(c, stage.collbucket, "")
167 func (s *IntegrationSuite) TestS3ProjectPutObjectSuccess(c *check.C) {
168 stage := s.s3setup(c)
169 defer stage.teardown(c)
170 s.testS3PutObjectSuccess(c, stage.projbucket, stage.coll.Name+"/")
172 func (s *IntegrationSuite) testS3PutObjectSuccess(c *check.C, bucket *s3.Bucket, prefix string) {
173 for _, trial := range []struct {
181 contentType: "application/octet-stream",
183 path: "newdir/newfile",
185 contentType: "application/octet-stream",
187 path: "newdir1/newdir2/newfile",
189 contentType: "application/octet-stream",
191 path: "newdir1/newdir2/newdir3/",
193 contentType: "application/x-directory",
196 c.Logf("=== %v", trial)
198 objname := prefix + trial.path
200 _, err := bucket.GetReader(objname)
201 c.Assert(err, check.ErrorMatches, `404 Not Found`)
203 buf := make([]byte, trial.size)
206 err = bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), trial.contentType, s3.Private, s3.Options{})
207 c.Check(err, check.IsNil)
209 rdr, err := bucket.GetReader(objname)
210 if strings.HasSuffix(trial.path, "/") && !s.testServer.Config.cluster.Collections.S3FolderObjects {
211 c.Check(err, check.NotNil)
213 } else if !c.Check(err, check.IsNil) {
216 buf2, err := ioutil.ReadAll(rdr)
217 c.Check(err, check.IsNil)
218 c.Check(buf2, check.HasLen, len(buf))
219 c.Check(bytes.Equal(buf, buf2), check.Equals, true)
223 func (s *IntegrationSuite) TestS3CollectionPutObjectFailure(c *check.C) {
224 stage := s.s3setup(c)
225 defer stage.teardown(c)
226 s.testS3PutObjectFailure(c, stage.collbucket, "")
228 func (s *IntegrationSuite) TestS3ProjectPutObjectFailure(c *check.C) {
229 stage := s.s3setup(c)
230 defer stage.teardown(c)
231 s.testS3PutObjectFailure(c, stage.projbucket, stage.coll.Name+"/")
233 func (s *IntegrationSuite) testS3PutObjectFailure(c *check.C, bucket *s3.Bucket, prefix string) {
234 s.testServer.Config.cluster.Collections.S3FolderObjects = false
235 var wg sync.WaitGroup
236 for _, trial := range []struct {
240 path: "emptyfile/newname", // emptyfile exists, see s3setup()
242 path: "emptyfile/", // emptyfile exists, see s3setup()
244 path: "emptydir", // dir already exists, see s3setup()
267 c.Logf("=== %v", trial)
269 objname := prefix + trial.path
271 buf := make([]byte, 1234)
274 err := bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), "application/octet-stream", s3.Private, s3.Options{})
275 if !c.Check(err, check.ErrorMatches, `400 Bad.*`, check.Commentf("PUT %q should fail", objname)) {
279 if objname != "" && objname != "/" {
280 _, err = bucket.GetReader(objname)
281 c.Check(err, check.ErrorMatches, `404 Not Found`, check.Commentf("GET %q should return 404", objname))
288 func (stage *s3stage) writeBigDirs(c *check.C, dirs int, filesPerDir int) {
289 fs, err := stage.coll.FileSystem(stage.arv, stage.kc)
290 c.Assert(err, check.IsNil)
291 for d := 0; d < dirs; d++ {
292 dir := fmt.Sprintf("dir%d", d)
293 c.Assert(fs.Mkdir(dir, 0755), check.IsNil)
294 for i := 0; i < filesPerDir; i++ {
295 f, err := fs.OpenFile(fmt.Sprintf("%s/file%d.txt", dir, i), os.O_CREATE|os.O_WRONLY, 0644)
296 c.Assert(err, check.IsNil)
297 c.Assert(f.Close(), check.IsNil)
300 c.Assert(fs.Sync(), check.IsNil)
303 func (s *IntegrationSuite) TestS3GetBucketVersioning(c *check.C) {
304 stage := s.s3setup(c)
305 defer stage.teardown(c)
306 for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
307 req, err := http.NewRequest("GET", bucket.URL("/"), nil)
308 req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
309 req.URL.RawQuery = "versioning"
310 resp, err := http.DefaultClient.Do(req)
311 c.Assert(err, check.IsNil)
312 c.Check(resp.Header.Get("Content-Type"), check.Equals, "application/xml")
313 buf, err := ioutil.ReadAll(resp.Body)
314 c.Assert(err, check.IsNil)
315 c.Check(string(buf), check.Equals, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>\n")
319 func (s *IntegrationSuite) TestS3CollectionList(c *check.C) {
320 stage := s.s3setup(c)
321 defer stage.teardown(c)
324 for markers, s.testServer.Config.cluster.Collections.S3FolderObjects = range []bool{false, true} {
327 stage.writeBigDirs(c, dirs, filesPerDir)
328 // Total # objects is:
329 // 2 file entries from s3setup (emptyfile and sailboat.txt)
330 // +1 fake "directory" marker from s3setup (emptydir) (if enabled)
331 // +dirs fake "directory" marker from writeBigDirs (dir0/, dir1/) (if enabled)
332 // +filesPerDir*dirs file entries from writeBigDirs (dir0/file0.txt, etc.)
333 s.testS3List(c, stage.collbucket, "", 4000, markers+2+(filesPerDir+markers)*dirs)
334 s.testS3List(c, stage.collbucket, "", 131, markers+2+(filesPerDir+markers)*dirs)
335 s.testS3List(c, stage.collbucket, "dir0/", 71, filesPerDir+markers)
338 func (s *IntegrationSuite) testS3List(c *check.C, bucket *s3.Bucket, prefix string, pageSize, expectFiles int) {
339 c.Logf("testS3List: prefix=%q pageSize=%d S3FolderObjects=%v", prefix, pageSize, s.testServer.Config.cluster.Collections.S3FolderObjects)
340 expectPageSize := pageSize
341 if expectPageSize > 1000 {
342 expectPageSize = 1000
344 gotKeys := map[string]s3.Key{}
348 resp, err := bucket.List(prefix, "", nextMarker, pageSize)
349 if !c.Check(err, check.IsNil) {
352 c.Check(len(resp.Contents) <= expectPageSize, check.Equals, true)
353 if pages++; !c.Check(pages <= (expectFiles/expectPageSize)+1, check.Equals, true) {
356 for _, key := range resp.Contents {
357 gotKeys[key.Key] = key
358 if strings.Contains(key.Key, "sailboat.txt") {
359 c.Check(key.Size, check.Equals, int64(4))
362 if !resp.IsTruncated {
363 c.Check(resp.NextMarker, check.Equals, "")
366 if !c.Check(resp.NextMarker, check.Not(check.Equals), "") {
369 nextMarker = resp.NextMarker
371 c.Check(len(gotKeys), check.Equals, expectFiles)
374 func (s *IntegrationSuite) TestS3CollectionListRollup(c *check.C) {
375 for _, s.testServer.Config.cluster.Collections.S3FolderObjects = range []bool{false, true} {
376 s.testS3CollectionListRollup(c)
380 func (s *IntegrationSuite) testS3CollectionListRollup(c *check.C) {
381 stage := s.s3setup(c)
382 defer stage.teardown(c)
386 stage.writeBigDirs(c, dirs, filesPerDir)
387 err := stage.collbucket.PutReader("dingbats", &bytes.Buffer{}, 0, "application/octet-stream", s3.Private, s3.Options{})
388 c.Assert(err, check.IsNil)
389 var allfiles []string
390 for marker := ""; ; {
391 resp, err := stage.collbucket.List("", "", marker, 20000)
392 c.Check(err, check.IsNil)
393 for _, key := range resp.Contents {
394 if len(allfiles) == 0 || allfiles[len(allfiles)-1] != key.Key {
395 allfiles = append(allfiles, key.Key)
398 marker = resp.NextMarker
404 if s.testServer.Config.cluster.Collections.S3FolderObjects {
407 c.Check(allfiles, check.HasLen, dirs*(filesPerDir+markers)+3+markers)
409 gotDirMarker := map[string]bool{}
410 for _, name := range allfiles {
411 isDirMarker := strings.HasSuffix(name, "/")
413 c.Check(isDirMarker, check.Equals, false, check.Commentf("name %q", name))
414 } else if isDirMarker {
415 gotDirMarker[name] = true
416 } else if i := strings.LastIndex(name, "/"); i >= 0 {
417 c.Check(gotDirMarker[name[:i+1]], check.Equals, true, check.Commentf("name %q", name))
418 gotDirMarker[name[:i+1]] = true // skip redundant complaints about this dir marker
422 for _, trial := range []struct {
437 {"dir0", "/", "dir0/file14.txt"}, // no commonprefixes
438 {"", "", "dir0/file14.txt"}, // middle page, skip walking dir1
439 {"", "", "dir1/file14.txt"}, // middle page, skip walking dir0
440 {"", "", "dir1/file498.txt"}, // last page of results
441 {"dir1/file", "", "dir1/file498.txt"}, // last page of results, with prefix
442 {"dir1/file", "/", "dir1/file498.txt"}, // last page of results, with prefix + delimiter
443 {"dir1", "Z", "dir1/file498.txt"}, // delimiter "Z" never appears
444 {"dir2", "/", ""}, // prefix "dir2" does not exist
447 c.Logf("\n\n=== trial %+v markers=%d", trial, markers)
450 resp, err := stage.collbucket.List(trial.prefix, trial.delimiter, trial.marker, maxKeys)
451 c.Check(err, check.IsNil)
452 if resp.IsTruncated && trial.delimiter == "" {
453 // goamz List method fills in the missing
454 // NextMarker field if resp.IsTruncated, so
455 // now we can't really tell whether it was
456 // sent by the server or by goamz. In cases
457 // where it should be empty but isn't, assume
458 // it's goamz's fault.
462 var expectKeys []string
463 var expectPrefixes []string
464 var expectNextMarker string
465 var expectTruncated bool
466 for _, key := range allfiles {
467 full := len(expectKeys)+len(expectPrefixes) >= maxKeys
468 if !strings.HasPrefix(key, trial.prefix) || key < trial.marker {
470 } else if idx := strings.Index(key[len(trial.prefix):], trial.delimiter); trial.delimiter != "" && idx >= 0 {
471 prefix := key[:len(trial.prefix)+idx+1]
472 if len(expectPrefixes) > 0 && expectPrefixes[len(expectPrefixes)-1] == prefix {
473 // same prefix as previous key
475 expectNextMarker = key
476 expectTruncated = true
478 expectPrefixes = append(expectPrefixes, prefix)
481 if trial.delimiter != "" {
482 expectNextMarker = key
484 expectTruncated = true
487 expectKeys = append(expectKeys, key)
492 for _, key := range resp.Contents {
493 gotKeys = append(gotKeys, key.Key)
495 var gotPrefixes []string
496 for _, prefix := range resp.CommonPrefixes {
497 gotPrefixes = append(gotPrefixes, prefix)
499 commentf := check.Commentf("trial %+v markers=%d", trial, markers)
500 c.Check(gotKeys, check.DeepEquals, expectKeys, commentf)
501 c.Check(gotPrefixes, check.DeepEquals, expectPrefixes, commentf)
502 c.Check(resp.NextMarker, check.Equals, expectNextMarker, commentf)
503 c.Check(resp.IsTruncated, check.Equals, expectTruncated, commentf)
504 c.Logf("=== trial %+v keys %q prefixes %q nextMarker %q", trial, gotKeys, gotPrefixes, resp.NextMarker)