16535: Use 1000 as default and maximum value for max-keys.
[arvados.git] / services / keep-web / s3_test.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package main
6
7 import (
8         "bytes"
9         "crypto/rand"
10         "fmt"
11         "io/ioutil"
12         "os"
13         "strings"
14         "sync"
15         "time"
16
17         "git.arvados.org/arvados.git/sdk/go/arvados"
18         "git.arvados.org/arvados.git/sdk/go/arvadosclient"
19         "git.arvados.org/arvados.git/sdk/go/arvadostest"
20         "git.arvados.org/arvados.git/sdk/go/keepclient"
21         "github.com/AdRoll/goamz/aws"
22         "github.com/AdRoll/goamz/s3"
23         check "gopkg.in/check.v1"
24 )
25
26 type s3stage struct {
27         arv        *arvados.Client
28         ac         *arvadosclient.ArvadosClient
29         kc         *keepclient.KeepClient
30         proj       arvados.Group
31         projbucket *s3.Bucket
32         coll       arvados.Collection
33         collbucket *s3.Bucket
34 }
35
36 func (s *IntegrationSuite) s3setup(c *check.C) s3stage {
37         var proj arvados.Group
38         var coll arvados.Collection
39         arv := arvados.NewClientFromEnv()
40         arv.AuthToken = arvadostest.ActiveToken
41         err := arv.RequestAndDecode(&proj, "POST", "arvados/v1/groups", nil, map[string]interface{}{
42                 "group": map[string]interface{}{
43                         "group_class": "project",
44                         "name":        "keep-web s3 test",
45                 },
46                 "ensure_unique_name": true,
47         })
48         c.Assert(err, check.IsNil)
49         err = arv.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{"collection": map[string]interface{}{
50                 "owner_uuid":    proj.UUID,
51                 "name":          "keep-web s3 test collection",
52                 "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:emptyfile\n./emptydir d41d8cd98f00b204e9800998ecf8427e+0 0:0:.\n",
53         }})
54         c.Assert(err, check.IsNil)
55         ac, err := arvadosclient.New(arv)
56         c.Assert(err, check.IsNil)
57         kc, err := keepclient.MakeKeepClient(ac)
58         c.Assert(err, check.IsNil)
59         fs, err := coll.FileSystem(arv, kc)
60         c.Assert(err, check.IsNil)
61         f, err := fs.OpenFile("sailboat.txt", os.O_CREATE|os.O_WRONLY, 0644)
62         c.Assert(err, check.IsNil)
63         _, err = f.Write([]byte("⛵\n"))
64         c.Assert(err, check.IsNil)
65         err = f.Close()
66         c.Assert(err, check.IsNil)
67         err = fs.Sync()
68         c.Assert(err, check.IsNil)
69         err = arv.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
70         c.Assert(err, check.IsNil)
71
72         auth := aws.NewAuth(arvadostest.ActiveTokenV2, arvadostest.ActiveTokenV2, "", time.Now().Add(time.Hour))
73         region := aws.Region{
74                 Name:       s.testServer.Addr,
75                 S3Endpoint: "http://" + s.testServer.Addr,
76         }
77         client := s3.New(*auth, region)
78         return s3stage{
79                 arv:  arv,
80                 ac:   ac,
81                 kc:   kc,
82                 proj: proj,
83                 projbucket: &s3.Bucket{
84                         S3:   client,
85                         Name: proj.UUID,
86                 },
87                 coll: coll,
88                 collbucket: &s3.Bucket{
89                         S3:   client,
90                         Name: coll.UUID,
91                 },
92         }
93 }
94
95 func (stage s3stage) teardown(c *check.C) {
96         if stage.coll.UUID != "" {
97                 err := stage.arv.RequestAndDecode(&stage.coll, "DELETE", "arvados/v1/collections/"+stage.coll.UUID, nil, nil)
98                 c.Check(err, check.IsNil)
99         }
100 }
101
102 func (s *IntegrationSuite) TestS3CollectionGetObject(c *check.C) {
103         stage := s.s3setup(c)
104         defer stage.teardown(c)
105         s.testS3GetObject(c, stage.collbucket, "")
106 }
107 func (s *IntegrationSuite) TestS3ProjectGetObject(c *check.C) {
108         stage := s.s3setup(c)
109         defer stage.teardown(c)
110         s.testS3GetObject(c, stage.projbucket, stage.coll.Name+"/")
111 }
112 func (s *IntegrationSuite) testS3GetObject(c *check.C, bucket *s3.Bucket, prefix string) {
113         rdr, err := bucket.GetReader(prefix + "emptyfile")
114         c.Assert(err, check.IsNil)
115         buf, err := ioutil.ReadAll(rdr)
116         c.Check(err, check.IsNil)
117         c.Check(len(buf), check.Equals, 0)
118         err = rdr.Close()
119         c.Check(err, check.IsNil)
120
121         rdr, err = bucket.GetReader(prefix + "missingfile")
122         c.Check(err, check.ErrorMatches, `404 Not Found`)
123
124         rdr, err = bucket.GetReader(prefix + "sailboat.txt")
125         c.Assert(err, check.IsNil)
126         buf, err = ioutil.ReadAll(rdr)
127         c.Check(err, check.IsNil)
128         c.Check(buf, check.DeepEquals, []byte("⛵\n"))
129         err = rdr.Close()
130         c.Check(err, check.IsNil)
131 }
132
133 func (s *IntegrationSuite) TestS3CollectionPutObjectSuccess(c *check.C) {
134         stage := s.s3setup(c)
135         defer stage.teardown(c)
136         s.testS3PutObjectSuccess(c, stage.collbucket, "")
137 }
138 func (s *IntegrationSuite) TestS3ProjectPutObjectSuccess(c *check.C) {
139         stage := s.s3setup(c)
140         defer stage.teardown(c)
141         s.testS3PutObjectSuccess(c, stage.projbucket, stage.coll.Name+"/")
142 }
143 func (s *IntegrationSuite) testS3PutObjectSuccess(c *check.C, bucket *s3.Bucket, prefix string) {
144         for _, trial := range []struct {
145                 path string
146                 size int
147         }{
148                 {
149                         path: "newfile",
150                         size: 128000000,
151                 }, {
152                         path: "newdir/newfile",
153                         size: 1 << 26,
154                 }, {
155                         path: "newdir1/newdir2/newfile",
156                         size: 0,
157                 },
158         } {
159                 c.Logf("=== %v", trial)
160
161                 objname := prefix + trial.path
162
163                 _, err := bucket.GetReader(objname)
164                 c.Assert(err, check.ErrorMatches, `404 Not Found`)
165
166                 buf := make([]byte, trial.size)
167                 rand.Read(buf)
168
169                 err = bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), "application/octet-stream", s3.Private, s3.Options{})
170                 c.Check(err, check.IsNil)
171
172                 rdr, err := bucket.GetReader(objname)
173                 if !c.Check(err, check.IsNil) {
174                         continue
175                 }
176                 buf2, err := ioutil.ReadAll(rdr)
177                 c.Check(err, check.IsNil)
178                 c.Check(buf2, check.HasLen, len(buf))
179                 c.Check(bytes.Equal(buf, buf2), check.Equals, true)
180         }
181 }
182
183 func (s *IntegrationSuite) TestS3CollectionPutObjectFailure(c *check.C) {
184         stage := s.s3setup(c)
185         defer stage.teardown(c)
186         s.testS3PutObjectFailure(c, stage.collbucket, "")
187 }
188 func (s *IntegrationSuite) TestS3ProjectPutObjectFailure(c *check.C) {
189         stage := s.s3setup(c)
190         defer stage.teardown(c)
191         s.testS3PutObjectFailure(c, stage.projbucket, stage.coll.Name+"/")
192 }
193 func (s *IntegrationSuite) testS3PutObjectFailure(c *check.C, bucket *s3.Bucket, prefix string) {
194         var wg sync.WaitGroup
195         for _, trial := range []struct {
196                 path string
197         }{
198                 {
199                         path: "emptyfile/newname", // emptyfile exists, see s3setup()
200                 }, {
201                         path: "emptyfile/", // emptyfile exists, see s3setup()
202                 }, {
203                         path: "emptydir", // dir already exists, see s3setup()
204                 }, {
205                         path: "emptydir/",
206                 }, {
207                         path: "emptydir//",
208                 }, {
209                         path: "newdir/",
210                 }, {
211                         path: "newdir//",
212                 }, {
213                         path: "/",
214                 }, {
215                         path: "//",
216                 }, {
217                         path: "foo//bar",
218                 }, {
219                         path: "",
220                 },
221         } {
222                 trial := trial
223                 wg.Add(1)
224                 go func() {
225                         defer wg.Done()
226                         c.Logf("=== %v", trial)
227
228                         objname := prefix + trial.path
229
230                         buf := make([]byte, 1234)
231                         rand.Read(buf)
232
233                         err := bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), "application/octet-stream", s3.Private, s3.Options{})
234                         if !c.Check(err, check.ErrorMatches, `400 Bad.*`, check.Commentf("PUT %q should fail", objname)) {
235                                 return
236                         }
237
238                         if objname != "" && objname != "/" {
239                                 _, err = bucket.GetReader(objname)
240                                 c.Check(err, check.ErrorMatches, `404 Not Found`, check.Commentf("GET %q should return 404", objname))
241                         }
242                 }()
243         }
244         wg.Wait()
245 }
246
247 func (stage *s3stage) writeBigDirs(c *check.C, dirs int, filesPerDir int) {
248         fs, err := stage.coll.FileSystem(stage.arv, stage.kc)
249         c.Assert(err, check.IsNil)
250         for d := 0; d < dirs; d++ {
251                 dir := fmt.Sprintf("dir%d", d)
252                 c.Assert(fs.Mkdir(dir, 0755), check.IsNil)
253                 for i := 0; i < filesPerDir; i++ {
254                         f, err := fs.OpenFile(fmt.Sprintf("%s/file%d.txt", dir, i), os.O_CREATE|os.O_WRONLY, 0644)
255                         c.Assert(err, check.IsNil)
256                         c.Assert(f.Close(), check.IsNil)
257                 }
258         }
259         c.Assert(fs.Sync(), check.IsNil)
260 }
261
262 func (s *IntegrationSuite) TestS3CollectionList(c *check.C) {
263         stage := s.s3setup(c)
264         defer stage.teardown(c)
265
266         filesPerDir := 1001
267         stage.writeBigDirs(c, 2, filesPerDir)
268         s.testS3List(c, stage.collbucket, "", 4000, 2+filesPerDir*2)
269         s.testS3List(c, stage.collbucket, "", 131, 2+filesPerDir*2)
270         s.testS3List(c, stage.collbucket, "dir0/", 71, filesPerDir)
271 }
272 func (s *IntegrationSuite) testS3List(c *check.C, bucket *s3.Bucket, prefix string, pageSize, expectFiles int) {
273         expectPageSize := pageSize
274         if expectPageSize > 1000 {
275                 expectPageSize = 1000
276         }
277         gotKeys := map[string]s3.Key{}
278         nextMarker := ""
279         pages := 0
280         for {
281                 resp, err := bucket.List(prefix, "", nextMarker, pageSize)
282                 if !c.Check(err, check.IsNil) {
283                         break
284                 }
285                 c.Check(len(resp.Contents) <= expectPageSize, check.Equals, true)
286                 if pages++; !c.Check(pages <= (expectFiles/expectPageSize)+1, check.Equals, true) {
287                         break
288                 }
289                 for _, key := range resp.Contents {
290                         gotKeys[key.Key] = key
291                 }
292                 if !resp.IsTruncated {
293                         c.Check(resp.NextMarker, check.Equals, "")
294                         break
295                 }
296                 if !c.Check(resp.NextMarker, check.Not(check.Equals), "") {
297                         break
298                 }
299                 nextMarker = resp.NextMarker
300         }
301         c.Check(len(gotKeys), check.Equals, expectFiles)
302 }
303
304 func (s *IntegrationSuite) TestS3CollectionListRollup(c *check.C) {
305         stage := s.s3setup(c)
306         defer stage.teardown(c)
307
308         dirs := 2
309         filesPerDir := 500
310         stage.writeBigDirs(c, dirs, filesPerDir)
311         err := stage.collbucket.PutReader("dingbats", &bytes.Buffer{}, 0, "application/octet-stream", s3.Private, s3.Options{})
312         c.Assert(err, check.IsNil)
313         var allfiles []string
314         for marker := ""; ; {
315                 resp, err := stage.collbucket.List("", "", marker, 20000)
316                 c.Check(err, check.IsNil)
317                 for _, key := range resp.Contents {
318                         if len(allfiles) == 0 || allfiles[len(allfiles)-1] != key.Key {
319                                 allfiles = append(allfiles, key.Key)
320                         }
321                 }
322                 marker = resp.NextMarker
323                 if marker == "" {
324                         break
325                 }
326         }
327         c.Check(allfiles, check.HasLen, dirs*filesPerDir+3)
328
329         for _, trial := range []struct {
330                 prefix    string
331                 delimiter string
332                 marker    string
333         }{
334                 {"di", "/", ""},
335                 {"di", "r", ""},
336                 {"di", "n", ""},
337                 {"dir0", "/", ""},
338                 {"dir0", "/", "dir0/file14.txt"},       // no commonprefixes
339                 {"", "", "dir0/file14.txt"},            // middle page, skip walking dir1
340                 {"", "", "dir1/file14.txt"},            // middle page, skip walking dir0
341                 {"", "", "dir1/file498.txt"},           // last page of results
342                 {"dir1/file", "", "dir1/file498.txt"},  // last page of results, with prefix
343                 {"dir1/file", "/", "dir1/file498.txt"}, // last page of results, with prefix + delimiter
344                 {"dir1", "Z", "dir1/file498.txt"},      // delimiter "Z" never appears
345                 {"dir2", "/", ""},                      // prefix "dir2" does not exist
346                 {"", "/", ""},
347         } {
348                 c.Logf("\n\n=== trial %+v", trial)
349
350                 maxKeys := 20
351                 resp, err := stage.collbucket.List(trial.prefix, trial.delimiter, trial.marker, maxKeys)
352                 c.Check(err, check.IsNil)
353                 if resp.IsTruncated && trial.delimiter == "" {
354                         // goamz List method fills in the missing
355                         // NextMarker field if resp.IsTruncated, so
356                         // now we can't really tell whether it was
357                         // sent by the server or by goamz. In cases
358                         // where it should be empty but isn't, assume
359                         // it's goamz's fault.
360                         resp.NextMarker = ""
361                 }
362
363                 var expectKeys []string
364                 var expectPrefixes []string
365                 var expectNextMarker string
366                 var expectTruncated bool
367                 for _, key := range allfiles {
368                         full := len(expectKeys)+len(expectPrefixes) >= maxKeys
369                         if !strings.HasPrefix(key, trial.prefix) || key < trial.marker {
370                                 continue
371                         } else if idx := strings.Index(key[len(trial.prefix):], trial.delimiter); trial.delimiter != "" && idx >= 0 {
372                                 prefix := key[:len(trial.prefix)+idx+1]
373                                 if len(expectPrefixes) > 0 && expectPrefixes[len(expectPrefixes)-1] == prefix {
374                                         // same prefix as previous key
375                                 } else if full {
376                                         expectNextMarker = key
377                                         expectTruncated = true
378                                 } else {
379                                         expectPrefixes = append(expectPrefixes, prefix)
380                                 }
381                         } else if full {
382                                 if trial.delimiter != "" {
383                                         expectNextMarker = key
384                                 }
385                                 expectTruncated = true
386                                 break
387                         } else {
388                                 expectKeys = append(expectKeys, key)
389                         }
390                 }
391
392                 var gotKeys []string
393                 for _, key := range resp.Contents {
394                         gotKeys = append(gotKeys, key.Key)
395                 }
396                 var gotPrefixes []string
397                 for _, prefix := range resp.CommonPrefixes {
398                         gotPrefixes = append(gotPrefixes, prefix)
399                 }
400                 c.Check(gotKeys, check.DeepEquals, expectKeys)
401                 c.Check(gotPrefixes, check.DeepEquals, expectPrefixes)
402                 c.Check(resp.NextMarker, check.Equals, expectNextMarker)
403                 c.Check(resp.IsTruncated, check.Equals, expectTruncated)
404                 c.Logf("=== trial %+v keys %q prefixes %q nextMarker %q", trial, gotKeys, gotPrefixes, resp.NextMarker)
405         }
406 }