1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
17 "git.arvados.org/arvados.git/sdk/go/arvados"
18 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
19 "git.arvados.org/arvados.git/sdk/go/arvadostest"
20 "git.arvados.org/arvados.git/sdk/go/keepclient"
21 "github.com/AdRoll/goamz/aws"
22 "github.com/AdRoll/goamz/s3"
23 check "gopkg.in/check.v1"
28 ac *arvadosclient.ArvadosClient
29 kc *keepclient.KeepClient
32 coll arvados.Collection
36 func (s *IntegrationSuite) s3setup(c *check.C) s3stage {
37 var proj arvados.Group
38 var coll arvados.Collection
39 arv := arvados.NewClientFromEnv()
40 arv.AuthToken = arvadostest.ActiveToken
41 err := arv.RequestAndDecode(&proj, "POST", "arvados/v1/groups", nil, map[string]interface{}{
42 "group": map[string]interface{}{
43 "group_class": "project",
44 "name": "keep-web s3 test",
46 "ensure_unique_name": true,
48 c.Assert(err, check.IsNil)
49 err = arv.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{"collection": map[string]interface{}{
50 "owner_uuid": proj.UUID,
51 "name": "keep-web s3 test collection",
52 "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:emptyfile\n./emptydir d41d8cd98f00b204e9800998ecf8427e+0 0:0:.\n",
54 c.Assert(err, check.IsNil)
55 ac, err := arvadosclient.New(arv)
56 c.Assert(err, check.IsNil)
57 kc, err := keepclient.MakeKeepClient(ac)
58 c.Assert(err, check.IsNil)
59 fs, err := coll.FileSystem(arv, kc)
60 c.Assert(err, check.IsNil)
61 f, err := fs.OpenFile("sailboat.txt", os.O_CREATE|os.O_WRONLY, 0644)
62 c.Assert(err, check.IsNil)
63 _, err = f.Write([]byte("⛵\n"))
64 c.Assert(err, check.IsNil)
66 c.Assert(err, check.IsNil)
68 c.Assert(err, check.IsNil)
69 err = arv.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
70 c.Assert(err, check.IsNil)
72 auth := aws.NewAuth(arvadostest.ActiveTokenV2, arvadostest.ActiveTokenV2, "", time.Now().Add(time.Hour))
74 Name: s.testServer.Addr,
75 S3Endpoint: "http://" + s.testServer.Addr,
77 client := s3.New(*auth, region)
83 projbucket: &s3.Bucket{
88 collbucket: &s3.Bucket{
95 func (stage s3stage) teardown(c *check.C) {
96 if stage.coll.UUID != "" {
97 err := stage.arv.RequestAndDecode(&stage.coll, "DELETE", "arvados/v1/collections/"+stage.coll.UUID, nil, nil)
98 c.Check(err, check.IsNil)
102 func (s *IntegrationSuite) TestS3CollectionGetObject(c *check.C) {
103 stage := s.s3setup(c)
104 defer stage.teardown(c)
105 s.testS3GetObject(c, stage.collbucket, "")
107 func (s *IntegrationSuite) TestS3ProjectGetObject(c *check.C) {
108 stage := s.s3setup(c)
109 defer stage.teardown(c)
110 s.testS3GetObject(c, stage.projbucket, stage.coll.Name+"/")
112 func (s *IntegrationSuite) testS3GetObject(c *check.C, bucket *s3.Bucket, prefix string) {
113 rdr, err := bucket.GetReader(prefix + "emptyfile")
114 c.Assert(err, check.IsNil)
115 buf, err := ioutil.ReadAll(rdr)
116 c.Check(err, check.IsNil)
117 c.Check(len(buf), check.Equals, 0)
119 c.Check(err, check.IsNil)
121 rdr, err = bucket.GetReader(prefix + "missingfile")
122 c.Check(err, check.ErrorMatches, `404 Not Found`)
124 rdr, err = bucket.GetReader(prefix + "sailboat.txt")
125 c.Assert(err, check.IsNil)
126 buf, err = ioutil.ReadAll(rdr)
127 c.Check(err, check.IsNil)
128 c.Check(buf, check.DeepEquals, []byte("⛵\n"))
130 c.Check(err, check.IsNil)
133 func (s *IntegrationSuite) TestS3CollectionPutObjectSuccess(c *check.C) {
134 stage := s.s3setup(c)
135 defer stage.teardown(c)
136 s.testS3PutObjectSuccess(c, stage.collbucket, "")
138 func (s *IntegrationSuite) TestS3ProjectPutObjectSuccess(c *check.C) {
139 stage := s.s3setup(c)
140 defer stage.teardown(c)
141 s.testS3PutObjectSuccess(c, stage.projbucket, stage.coll.Name+"/")
143 func (s *IntegrationSuite) testS3PutObjectSuccess(c *check.C, bucket *s3.Bucket, prefix string) {
144 for _, trial := range []struct {
152 path: "newdir/newfile",
155 path: "newdir1/newdir2/newfile",
159 c.Logf("=== %v", trial)
161 objname := prefix + trial.path
163 _, err := bucket.GetReader(objname)
164 c.Assert(err, check.ErrorMatches, `404 Not Found`)
166 buf := make([]byte, trial.size)
169 err = bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), "application/octet-stream", s3.Private, s3.Options{})
170 c.Check(err, check.IsNil)
172 rdr, err := bucket.GetReader(objname)
173 if !c.Check(err, check.IsNil) {
176 buf2, err := ioutil.ReadAll(rdr)
177 c.Check(err, check.IsNil)
178 c.Check(buf2, check.HasLen, len(buf))
179 c.Check(bytes.Equal(buf, buf2), check.Equals, true)
183 func (s *IntegrationSuite) TestS3CollectionPutObjectFailure(c *check.C) {
184 stage := s.s3setup(c)
185 defer stage.teardown(c)
186 s.testS3PutObjectFailure(c, stage.collbucket, "")
188 func (s *IntegrationSuite) TestS3ProjectPutObjectFailure(c *check.C) {
189 stage := s.s3setup(c)
190 defer stage.teardown(c)
191 s.testS3PutObjectFailure(c, stage.projbucket, stage.coll.Name+"/")
193 func (s *IntegrationSuite) testS3PutObjectFailure(c *check.C, bucket *s3.Bucket, prefix string) {
194 var wg sync.WaitGroup
195 for _, trial := range []struct {
199 path: "emptyfile/newname", // emptyfile exists, see s3setup()
201 path: "emptyfile/", // emptyfile exists, see s3setup()
203 path: "emptydir", // dir already exists, see s3setup()
226 c.Logf("=== %v", trial)
228 objname := prefix + trial.path
230 buf := make([]byte, 1234)
233 err := bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), "application/octet-stream", s3.Private, s3.Options{})
234 if !c.Check(err, check.ErrorMatches, `400 Bad.*`, check.Commentf("PUT %q should fail", objname)) {
238 if objname != "" && objname != "/" {
239 _, err = bucket.GetReader(objname)
240 c.Check(err, check.ErrorMatches, `404 Not Found`, check.Commentf("GET %q should return 404", objname))
247 func (stage *s3stage) writeBigDirs(c *check.C, dirs int, filesPerDir int) {
248 fs, err := stage.coll.FileSystem(stage.arv, stage.kc)
249 c.Assert(err, check.IsNil)
250 for d := 0; d < dirs; d++ {
251 dir := fmt.Sprintf("dir%d", d)
252 c.Assert(fs.Mkdir(dir, 0755), check.IsNil)
253 for i := 0; i < filesPerDir; i++ {
254 f, err := fs.OpenFile(fmt.Sprintf("%s/file%d.txt", dir, i), os.O_CREATE|os.O_WRONLY, 0644)
255 c.Assert(err, check.IsNil)
256 c.Assert(f.Close(), check.IsNil)
259 c.Assert(fs.Sync(), check.IsNil)
262 func (s *IntegrationSuite) TestS3CollectionList(c *check.C) {
263 stage := s.s3setup(c)
264 defer stage.teardown(c)
267 stage.writeBigDirs(c, 2, filesPerDir)
268 s.testS3List(c, stage.collbucket, "", 4000, 2+filesPerDir*2)
269 s.testS3List(c, stage.collbucket, "", 131, 2+filesPerDir*2)
270 s.testS3List(c, stage.collbucket, "dir0/", 71, filesPerDir)
272 func (s *IntegrationSuite) testS3List(c *check.C, bucket *s3.Bucket, prefix string, pageSize, expectFiles int) {
273 expectPageSize := pageSize
274 if expectPageSize > 1000 {
275 expectPageSize = 1000
277 gotKeys := map[string]s3.Key{}
281 resp, err := bucket.List(prefix, "", nextMarker, pageSize)
282 if !c.Check(err, check.IsNil) {
285 c.Check(len(resp.Contents) <= expectPageSize, check.Equals, true)
286 if pages++; !c.Check(pages <= (expectFiles/expectPageSize)+1, check.Equals, true) {
289 for _, key := range resp.Contents {
290 gotKeys[key.Key] = key
292 if !resp.IsTruncated {
293 c.Check(resp.NextMarker, check.Equals, "")
296 if !c.Check(resp.NextMarker, check.Not(check.Equals), "") {
299 nextMarker = resp.NextMarker
301 c.Check(len(gotKeys), check.Equals, expectFiles)
304 func (s *IntegrationSuite) TestS3CollectionListRollup(c *check.C) {
305 stage := s.s3setup(c)
306 defer stage.teardown(c)
310 stage.writeBigDirs(c, dirs, filesPerDir)
311 err := stage.collbucket.PutReader("dingbats", &bytes.Buffer{}, 0, "application/octet-stream", s3.Private, s3.Options{})
312 c.Assert(err, check.IsNil)
313 var allfiles []string
314 for marker := ""; ; {
315 resp, err := stage.collbucket.List("", "", marker, 20000)
316 c.Check(err, check.IsNil)
317 for _, key := range resp.Contents {
318 if len(allfiles) == 0 || allfiles[len(allfiles)-1] != key.Key {
319 allfiles = append(allfiles, key.Key)
322 marker = resp.NextMarker
327 c.Check(allfiles, check.HasLen, dirs*filesPerDir+3)
329 for _, trial := range []struct {
338 {"dir0", "/", "dir0/file14.txt"}, // no commonprefixes
339 {"", "", "dir0/file14.txt"}, // middle page, skip walking dir1
340 {"", "", "dir1/file14.txt"}, // middle page, skip walking dir0
341 {"", "", "dir1/file498.txt"}, // last page of results
342 {"dir1/file", "", "dir1/file498.txt"}, // last page of results, with prefix
343 {"dir1/file", "/", "dir1/file498.txt"}, // last page of results, with prefix + delimiter
344 {"dir1", "Z", "dir1/file498.txt"}, // delimiter "Z" never appears
345 {"dir2", "/", ""}, // prefix "dir2" does not exist
348 c.Logf("\n\n=== trial %+v", trial)
351 resp, err := stage.collbucket.List(trial.prefix, trial.delimiter, trial.marker, maxKeys)
352 c.Check(err, check.IsNil)
353 if resp.IsTruncated && trial.delimiter == "" {
354 // goamz List method fills in the missing
355 // NextMarker field if resp.IsTruncated, so
356 // now we can't really tell whether it was
357 // sent by the server or by goamz. In cases
358 // where it should be empty but isn't, assume
359 // it's goamz's fault.
363 var expectKeys []string
364 var expectPrefixes []string
365 var expectNextMarker string
366 var expectTruncated bool
367 for _, key := range allfiles {
368 full := len(expectKeys)+len(expectPrefixes) >= maxKeys
369 if !strings.HasPrefix(key, trial.prefix) || key < trial.marker {
371 } else if idx := strings.Index(key[len(trial.prefix):], trial.delimiter); trial.delimiter != "" && idx >= 0 {
372 prefix := key[:len(trial.prefix)+idx+1]
373 if len(expectPrefixes) > 0 && expectPrefixes[len(expectPrefixes)-1] == prefix {
374 // same prefix as previous key
376 expectNextMarker = key
377 expectTruncated = true
379 expectPrefixes = append(expectPrefixes, prefix)
382 if trial.delimiter != "" {
383 expectNextMarker = key
385 expectTruncated = true
388 expectKeys = append(expectKeys, key)
393 for _, key := range resp.Contents {
394 gotKeys = append(gotKeys, key.Key)
396 var gotPrefixes []string
397 for _, prefix := range resp.CommonPrefixes {
398 gotPrefixes = append(gotPrefixes, prefix)
400 c.Check(gotKeys, check.DeepEquals, expectKeys)
401 c.Check(gotPrefixes, check.DeepEquals, expectPrefixes)
402 c.Check(resp.NextMarker, check.Equals, expectNextMarker)
403 c.Check(resp.IsTruncated, check.Equals, expectTruncated)
404 c.Logf("=== trial %+v keys %q prefixes %q nextMarker %q", trial, gotKeys, gotPrefixes, resp.NextMarker)