1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
18 "git.arvados.org/arvados.git/sdk/go/arvados"
19 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
20 "git.arvados.org/arvados.git/sdk/go/arvadostest"
21 "git.arvados.org/arvados.git/sdk/go/keepclient"
22 "github.com/AdRoll/goamz/aws"
23 "github.com/AdRoll/goamz/s3"
24 check "gopkg.in/check.v1"
29 ac *arvadosclient.ArvadosClient
30 kc *keepclient.KeepClient
33 coll arvados.Collection
37 func (s *IntegrationSuite) s3setup(c *check.C) s3stage {
38 var proj arvados.Group
39 var coll arvados.Collection
40 arv := arvados.NewClientFromEnv()
41 arv.AuthToken = arvadostest.ActiveToken
42 err := arv.RequestAndDecode(&proj, "POST", "arvados/v1/groups", nil, map[string]interface{}{
43 "group": map[string]interface{}{
44 "group_class": "project",
45 "name": "keep-web s3 test",
47 "ensure_unique_name": true,
49 c.Assert(err, check.IsNil)
50 err = arv.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{"collection": map[string]interface{}{
51 "owner_uuid": proj.UUID,
52 "name": "keep-web s3 test collection",
53 "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:emptyfile\n./emptydir d41d8cd98f00b204e9800998ecf8427e+0 0:0:.\n",
55 c.Assert(err, check.IsNil)
56 ac, err := arvadosclient.New(arv)
57 c.Assert(err, check.IsNil)
58 kc, err := keepclient.MakeKeepClient(ac)
59 c.Assert(err, check.IsNil)
60 fs, err := coll.FileSystem(arv, kc)
61 c.Assert(err, check.IsNil)
62 f, err := fs.OpenFile("sailboat.txt", os.O_CREATE|os.O_WRONLY, 0644)
63 c.Assert(err, check.IsNil)
64 _, err = f.Write([]byte("⛵\n"))
65 c.Assert(err, check.IsNil)
67 c.Assert(err, check.IsNil)
69 c.Assert(err, check.IsNil)
70 err = arv.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
71 c.Assert(err, check.IsNil)
73 auth := aws.NewAuth(arvadostest.ActiveTokenV2, arvadostest.ActiveTokenV2, "", time.Now().Add(time.Hour))
75 Name: s.testServer.Addr,
76 S3Endpoint: "http://" + s.testServer.Addr,
78 client := s3.New(*auth, region)
84 projbucket: &s3.Bucket{
89 collbucket: &s3.Bucket{
96 func (stage s3stage) teardown(c *check.C) {
97 if stage.coll.UUID != "" {
98 err := stage.arv.RequestAndDecode(&stage.coll, "DELETE", "arvados/v1/collections/"+stage.coll.UUID, nil, nil)
99 c.Check(err, check.IsNil)
101 if stage.proj.UUID != "" {
102 err := stage.arv.RequestAndDecode(&stage.proj, "DELETE", "arvados/v1/groups/"+stage.proj.UUID, nil, nil)
103 c.Check(err, check.IsNil)
107 func (s *IntegrationSuite) TestS3HeadBucket(c *check.C) {
108 stage := s.s3setup(c)
109 defer stage.teardown(c)
111 for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
112 c.Logf("bucket %s", bucket.Name)
113 exists, err := bucket.Exists("")
114 c.Check(err, check.IsNil)
115 c.Check(exists, check.Equals, true)
119 func (s *IntegrationSuite) TestS3CollectionGetObject(c *check.C) {
120 stage := s.s3setup(c)
121 defer stage.teardown(c)
122 s.testS3GetObject(c, stage.collbucket, "")
124 func (s *IntegrationSuite) TestS3ProjectGetObject(c *check.C) {
125 stage := s.s3setup(c)
126 defer stage.teardown(c)
127 s.testS3GetObject(c, stage.projbucket, stage.coll.Name+"/")
129 func (s *IntegrationSuite) testS3GetObject(c *check.C, bucket *s3.Bucket, prefix string) {
130 rdr, err := bucket.GetReader(prefix + "emptyfile")
131 c.Assert(err, check.IsNil)
132 buf, err := ioutil.ReadAll(rdr)
133 c.Check(err, check.IsNil)
134 c.Check(len(buf), check.Equals, 0)
136 c.Check(err, check.IsNil)
139 rdr, err = bucket.GetReader(prefix + "missingfile")
140 c.Check(err, check.ErrorMatches, `404 Not Found`)
143 exists, err := bucket.Exists(prefix + "missingfile")
144 c.Check(err, check.IsNil)
145 c.Check(exists, check.Equals, false)
148 rdr, err = bucket.GetReader(prefix + "sailboat.txt")
149 c.Assert(err, check.IsNil)
150 buf, err = ioutil.ReadAll(rdr)
151 c.Check(err, check.IsNil)
152 c.Check(buf, check.DeepEquals, []byte("⛵\n"))
154 c.Check(err, check.IsNil)
157 exists, err = bucket.Exists(prefix + "sailboat.txt")
158 c.Check(err, check.IsNil)
159 c.Check(exists, check.Equals, true)
162 func (s *IntegrationSuite) TestS3CollectionPutObjectSuccess(c *check.C) {
163 stage := s.s3setup(c)
164 defer stage.teardown(c)
165 s.testS3PutObjectSuccess(c, stage.collbucket, "")
167 func (s *IntegrationSuite) TestS3ProjectPutObjectSuccess(c *check.C) {
168 stage := s.s3setup(c)
169 defer stage.teardown(c)
170 s.testS3PutObjectSuccess(c, stage.projbucket, stage.coll.Name+"/")
172 func (s *IntegrationSuite) testS3PutObjectSuccess(c *check.C, bucket *s3.Bucket, prefix string) {
173 for _, trial := range []struct {
181 path: "newdir/newfile",
184 path: "newdir1/newdir2/newfile",
188 c.Logf("=== %v", trial)
190 objname := prefix + trial.path
192 _, err := bucket.GetReader(objname)
193 c.Assert(err, check.ErrorMatches, `404 Not Found`)
195 buf := make([]byte, trial.size)
198 err = bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), "application/octet-stream", s3.Private, s3.Options{})
199 c.Check(err, check.IsNil)
201 rdr, err := bucket.GetReader(objname)
202 if !c.Check(err, check.IsNil) {
205 buf2, err := ioutil.ReadAll(rdr)
206 c.Check(err, check.IsNil)
207 c.Check(buf2, check.HasLen, len(buf))
208 c.Check(bytes.Equal(buf, buf2), check.Equals, true)
212 func (s *IntegrationSuite) TestS3CollectionPutObjectFailure(c *check.C) {
213 stage := s.s3setup(c)
214 defer stage.teardown(c)
215 s.testS3PutObjectFailure(c, stage.collbucket, "")
217 func (s *IntegrationSuite) TestS3ProjectPutObjectFailure(c *check.C) {
218 stage := s.s3setup(c)
219 defer stage.teardown(c)
220 s.testS3PutObjectFailure(c, stage.projbucket, stage.coll.Name+"/")
222 func (s *IntegrationSuite) testS3PutObjectFailure(c *check.C, bucket *s3.Bucket, prefix string) {
223 var wg sync.WaitGroup
224 for _, trial := range []struct {
228 path: "emptyfile/newname", // emptyfile exists, see s3setup()
230 path: "emptyfile/", // emptyfile exists, see s3setup()
232 path: "emptydir", // dir already exists, see s3setup()
255 c.Logf("=== %v", trial)
257 objname := prefix + trial.path
259 buf := make([]byte, 1234)
262 err := bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), "application/octet-stream", s3.Private, s3.Options{})
263 if !c.Check(err, check.ErrorMatches, `400 Bad.*`, check.Commentf("PUT %q should fail", objname)) {
267 if objname != "" && objname != "/" {
268 _, err = bucket.GetReader(objname)
269 c.Check(err, check.ErrorMatches, `404 Not Found`, check.Commentf("GET %q should return 404", objname))
276 func (stage *s3stage) writeBigDirs(c *check.C, dirs int, filesPerDir int) {
277 fs, err := stage.coll.FileSystem(stage.arv, stage.kc)
278 c.Assert(err, check.IsNil)
279 for d := 0; d < dirs; d++ {
280 dir := fmt.Sprintf("dir%d", d)
281 c.Assert(fs.Mkdir(dir, 0755), check.IsNil)
282 for i := 0; i < filesPerDir; i++ {
283 f, err := fs.OpenFile(fmt.Sprintf("%s/file%d.txt", dir, i), os.O_CREATE|os.O_WRONLY, 0644)
284 c.Assert(err, check.IsNil)
285 c.Assert(f.Close(), check.IsNil)
288 c.Assert(fs.Sync(), check.IsNil)
291 func (s *IntegrationSuite) TestS3GetBucketVersioning(c *check.C) {
292 stage := s.s3setup(c)
293 defer stage.teardown(c)
294 for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
295 req, err := http.NewRequest("GET", bucket.URL("/"), nil)
296 req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
297 req.URL.RawQuery = "versioning"
298 resp, err := http.DefaultClient.Do(req)
299 c.Assert(err, check.IsNil)
300 buf, err := ioutil.ReadAll(resp.Body)
301 c.Assert(err, check.IsNil)
302 c.Check(strings.TrimSpace(string(buf)), check.Equals, `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`)
306 func (s *IntegrationSuite) TestS3CollectionList(c *check.C) {
307 stage := s.s3setup(c)
308 defer stage.teardown(c)
311 stage.writeBigDirs(c, 2, filesPerDir)
312 s.testS3List(c, stage.collbucket, "", 4000, 2+filesPerDir*2)
313 s.testS3List(c, stage.collbucket, "", 131, 2+filesPerDir*2)
314 s.testS3List(c, stage.collbucket, "dir0/", 71, filesPerDir)
316 func (s *IntegrationSuite) testS3List(c *check.C, bucket *s3.Bucket, prefix string, pageSize, expectFiles int) {
317 expectPageSize := pageSize
318 if expectPageSize > 1000 {
319 expectPageSize = 1000
321 gotKeys := map[string]s3.Key{}
325 resp, err := bucket.List(prefix, "", nextMarker, pageSize)
326 if !c.Check(err, check.IsNil) {
329 c.Check(len(resp.Contents) <= expectPageSize, check.Equals, true)
330 if pages++; !c.Check(pages <= (expectFiles/expectPageSize)+1, check.Equals, true) {
333 for _, key := range resp.Contents {
334 gotKeys[key.Key] = key
336 if !resp.IsTruncated {
337 c.Check(resp.NextMarker, check.Equals, "")
340 if !c.Check(resp.NextMarker, check.Not(check.Equals), "") {
343 nextMarker = resp.NextMarker
345 c.Check(len(gotKeys), check.Equals, expectFiles)
348 func (s *IntegrationSuite) TestS3CollectionListRollup(c *check.C) {
349 stage := s.s3setup(c)
350 defer stage.teardown(c)
354 stage.writeBigDirs(c, dirs, filesPerDir)
355 err := stage.collbucket.PutReader("dingbats", &bytes.Buffer{}, 0, "application/octet-stream", s3.Private, s3.Options{})
356 c.Assert(err, check.IsNil)
357 var allfiles []string
358 for marker := ""; ; {
359 resp, err := stage.collbucket.List("", "", marker, 20000)
360 c.Check(err, check.IsNil)
361 for _, key := range resp.Contents {
362 if len(allfiles) == 0 || allfiles[len(allfiles)-1] != key.Key {
363 allfiles = append(allfiles, key.Key)
366 marker = resp.NextMarker
371 c.Check(allfiles, check.HasLen, dirs*filesPerDir+3)
373 for _, trial := range []struct {
382 {"dir0", "/", "dir0/file14.txt"}, // no commonprefixes
383 {"", "", "dir0/file14.txt"}, // middle page, skip walking dir1
384 {"", "", "dir1/file14.txt"}, // middle page, skip walking dir0
385 {"", "", "dir1/file498.txt"}, // last page of results
386 {"dir1/file", "", "dir1/file498.txt"}, // last page of results, with prefix
387 {"dir1/file", "/", "dir1/file498.txt"}, // last page of results, with prefix + delimiter
388 {"dir1", "Z", "dir1/file498.txt"}, // delimiter "Z" never appears
389 {"dir2", "/", ""}, // prefix "dir2" does not exist
392 c.Logf("\n\n=== trial %+v", trial)
395 resp, err := stage.collbucket.List(trial.prefix, trial.delimiter, trial.marker, maxKeys)
396 c.Check(err, check.IsNil)
397 if resp.IsTruncated && trial.delimiter == "" {
398 // goamz List method fills in the missing
399 // NextMarker field if resp.IsTruncated, so
400 // now we can't really tell whether it was
401 // sent by the server or by goamz. In cases
402 // where it should be empty but isn't, assume
403 // it's goamz's fault.
407 var expectKeys []string
408 var expectPrefixes []string
409 var expectNextMarker string
410 var expectTruncated bool
411 for _, key := range allfiles {
412 full := len(expectKeys)+len(expectPrefixes) >= maxKeys
413 if !strings.HasPrefix(key, trial.prefix) || key < trial.marker {
415 } else if idx := strings.Index(key[len(trial.prefix):], trial.delimiter); trial.delimiter != "" && idx >= 0 {
416 prefix := key[:len(trial.prefix)+idx+1]
417 if len(expectPrefixes) > 0 && expectPrefixes[len(expectPrefixes)-1] == prefix {
418 // same prefix as previous key
420 expectNextMarker = key
421 expectTruncated = true
423 expectPrefixes = append(expectPrefixes, prefix)
426 if trial.delimiter != "" {
427 expectNextMarker = key
429 expectTruncated = true
432 expectKeys = append(expectKeys, key)
437 for _, key := range resp.Contents {
438 gotKeys = append(gotKeys, key.Key)
440 var gotPrefixes []string
441 for _, prefix := range resp.CommonPrefixes {
442 gotPrefixes = append(gotPrefixes, prefix)
444 c.Check(gotKeys, check.DeepEquals, expectKeys)
445 c.Check(gotPrefixes, check.DeepEquals, expectPrefixes)
446 c.Check(resp.NextMarker, check.Equals, expectNextMarker)
447 c.Check(resp.IsTruncated, check.Equals, expectTruncated)
448 c.Logf("=== trial %+v keys %q prefixes %q nextMarker %q", trial, gotKeys, gotPrefixes, resp.NextMarker)