16625: Merge branch 'master' into 16625-add-azure-managed-image-support
[arvados.git] / services / keep-web / s3_test.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package main
6
7 import (
8         "bytes"
9         "crypto/rand"
10         "fmt"
11         "io/ioutil"
12         "net/http"
13         "os"
14         "strings"
15         "sync"
16         "time"
17
18         "git.arvados.org/arvados.git/sdk/go/arvados"
19         "git.arvados.org/arvados.git/sdk/go/arvadosclient"
20         "git.arvados.org/arvados.git/sdk/go/arvadostest"
21         "git.arvados.org/arvados.git/sdk/go/keepclient"
22         "github.com/AdRoll/goamz/aws"
23         "github.com/AdRoll/goamz/s3"
24         check "gopkg.in/check.v1"
25 )
26
27 type s3stage struct {
28         arv        *arvados.Client
29         ac         *arvadosclient.ArvadosClient
30         kc         *keepclient.KeepClient
31         proj       arvados.Group
32         projbucket *s3.Bucket
33         coll       arvados.Collection
34         collbucket *s3.Bucket
35 }
36
37 func (s *IntegrationSuite) s3setup(c *check.C) s3stage {
38         var proj arvados.Group
39         var coll arvados.Collection
40         arv := arvados.NewClientFromEnv()
41         arv.AuthToken = arvadostest.ActiveToken
42         err := arv.RequestAndDecode(&proj, "POST", "arvados/v1/groups", nil, map[string]interface{}{
43                 "group": map[string]interface{}{
44                         "group_class": "project",
45                         "name":        "keep-web s3 test",
46                 },
47                 "ensure_unique_name": true,
48         })
49         c.Assert(err, check.IsNil)
50         err = arv.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{"collection": map[string]interface{}{
51                 "owner_uuid":    proj.UUID,
52                 "name":          "keep-web s3 test collection",
53                 "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:emptyfile\n./emptydir d41d8cd98f00b204e9800998ecf8427e+0 0:0:.\n",
54         }})
55         c.Assert(err, check.IsNil)
56         ac, err := arvadosclient.New(arv)
57         c.Assert(err, check.IsNil)
58         kc, err := keepclient.MakeKeepClient(ac)
59         c.Assert(err, check.IsNil)
60         fs, err := coll.FileSystem(arv, kc)
61         c.Assert(err, check.IsNil)
62         f, err := fs.OpenFile("sailboat.txt", os.O_CREATE|os.O_WRONLY, 0644)
63         c.Assert(err, check.IsNil)
64         _, err = f.Write([]byte("⛵\n"))
65         c.Assert(err, check.IsNil)
66         err = f.Close()
67         c.Assert(err, check.IsNil)
68         err = fs.Sync()
69         c.Assert(err, check.IsNil)
70         err = arv.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
71         c.Assert(err, check.IsNil)
72
73         auth := aws.NewAuth(arvadostest.ActiveTokenV2, arvadostest.ActiveTokenV2, "", time.Now().Add(time.Hour))
74         region := aws.Region{
75                 Name:       s.testServer.Addr,
76                 S3Endpoint: "http://" + s.testServer.Addr,
77         }
78         client := s3.New(*auth, region)
79         return s3stage{
80                 arv:  arv,
81                 ac:   ac,
82                 kc:   kc,
83                 proj: proj,
84                 projbucket: &s3.Bucket{
85                         S3:   client,
86                         Name: proj.UUID,
87                 },
88                 coll: coll,
89                 collbucket: &s3.Bucket{
90                         S3:   client,
91                         Name: coll.UUID,
92                 },
93         }
94 }
95
96 func (stage s3stage) teardown(c *check.C) {
97         if stage.coll.UUID != "" {
98                 err := stage.arv.RequestAndDecode(&stage.coll, "DELETE", "arvados/v1/collections/"+stage.coll.UUID, nil, nil)
99                 c.Check(err, check.IsNil)
100         }
101         if stage.proj.UUID != "" {
102                 err := stage.arv.RequestAndDecode(&stage.proj, "DELETE", "arvados/v1/groups/"+stage.proj.UUID, nil, nil)
103                 c.Check(err, check.IsNil)
104         }
105 }
106
107 func (s *IntegrationSuite) TestS3HeadBucket(c *check.C) {
108         stage := s.s3setup(c)
109         defer stage.teardown(c)
110
111         for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
112                 c.Logf("bucket %s", bucket.Name)
113                 exists, err := bucket.Exists("")
114                 c.Check(err, check.IsNil)
115                 c.Check(exists, check.Equals, true)
116         }
117 }
118
119 func (s *IntegrationSuite) TestS3CollectionGetObject(c *check.C) {
120         stage := s.s3setup(c)
121         defer stage.teardown(c)
122         s.testS3GetObject(c, stage.collbucket, "")
123 }
124 func (s *IntegrationSuite) TestS3ProjectGetObject(c *check.C) {
125         stage := s.s3setup(c)
126         defer stage.teardown(c)
127         s.testS3GetObject(c, stage.projbucket, stage.coll.Name+"/")
128 }
129 func (s *IntegrationSuite) testS3GetObject(c *check.C, bucket *s3.Bucket, prefix string) {
130         rdr, err := bucket.GetReader(prefix + "emptyfile")
131         c.Assert(err, check.IsNil)
132         buf, err := ioutil.ReadAll(rdr)
133         c.Check(err, check.IsNil)
134         c.Check(len(buf), check.Equals, 0)
135         err = rdr.Close()
136         c.Check(err, check.IsNil)
137
138         // GetObject
139         rdr, err = bucket.GetReader(prefix + "missingfile")
140         c.Check(err, check.ErrorMatches, `404 Not Found`)
141
142         // HeadObject
143         exists, err := bucket.Exists(prefix + "missingfile")
144         c.Check(err, check.IsNil)
145         c.Check(exists, check.Equals, false)
146
147         // GetObject
148         rdr, err = bucket.GetReader(prefix + "sailboat.txt")
149         c.Assert(err, check.IsNil)
150         buf, err = ioutil.ReadAll(rdr)
151         c.Check(err, check.IsNil)
152         c.Check(buf, check.DeepEquals, []byte("⛵\n"))
153         err = rdr.Close()
154         c.Check(err, check.IsNil)
155
156         // HeadObject
157         exists, err = bucket.Exists(prefix + "sailboat.txt")
158         c.Check(err, check.IsNil)
159         c.Check(exists, check.Equals, true)
160 }
161
162 func (s *IntegrationSuite) TestS3CollectionPutObjectSuccess(c *check.C) {
163         stage := s.s3setup(c)
164         defer stage.teardown(c)
165         s.testS3PutObjectSuccess(c, stage.collbucket, "")
166 }
167 func (s *IntegrationSuite) TestS3ProjectPutObjectSuccess(c *check.C) {
168         stage := s.s3setup(c)
169         defer stage.teardown(c)
170         s.testS3PutObjectSuccess(c, stage.projbucket, stage.coll.Name+"/")
171 }
172 func (s *IntegrationSuite) testS3PutObjectSuccess(c *check.C, bucket *s3.Bucket, prefix string) {
173         for _, trial := range []struct {
174                 path        string
175                 size        int
176                 contentType string
177         }{
178                 {
179                         path:        "newfile",
180                         size:        128000000,
181                         contentType: "application/octet-stream",
182                 }, {
183                         path:        "newdir/newfile",
184                         size:        1 << 26,
185                         contentType: "application/octet-stream",
186                 }, {
187                         path:        "newdir1/newdir2/newfile",
188                         size:        0,
189                         contentType: "application/octet-stream",
190                 }, {
191                         path:        "newdir1/newdir2/newdir3/",
192                         size:        0,
193                         contentType: "application/x-directory",
194                 },
195         } {
196                 c.Logf("=== %v", trial)
197
198                 objname := prefix + trial.path
199
200                 _, err := bucket.GetReader(objname)
201                 c.Assert(err, check.ErrorMatches, `404 Not Found`)
202
203                 buf := make([]byte, trial.size)
204                 rand.Read(buf)
205
206                 err = bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), trial.contentType, s3.Private, s3.Options{})
207                 c.Check(err, check.IsNil)
208
209                 rdr, err := bucket.GetReader(objname)
210                 if strings.HasSuffix(trial.path, "/") && !s.testServer.Config.cluster.Collections.S3FolderObjects {
211                         c.Check(err, check.NotNil)
212                         continue
213                 } else if !c.Check(err, check.IsNil) {
214                         continue
215                 }
216                 buf2, err := ioutil.ReadAll(rdr)
217                 c.Check(err, check.IsNil)
218                 c.Check(buf2, check.HasLen, len(buf))
219                 c.Check(bytes.Equal(buf, buf2), check.Equals, true)
220         }
221 }
222
223 func (s *IntegrationSuite) TestS3ProjectPutObjectNotSupported(c *check.C) {
224         stage := s.s3setup(c)
225         defer stage.teardown(c)
226         bucket := stage.projbucket
227
228         for _, trial := range []struct {
229                 path        string
230                 size        int
231                 contentType string
232         }{
233                 {
234                         path:        "newfile",
235                         size:        1234,
236                         contentType: "application/octet-stream",
237                 }, {
238                         path:        "newdir/newfile",
239                         size:        1234,
240                         contentType: "application/octet-stream",
241                 }, {
242                         path:        "newdir2/",
243                         size:        0,
244                         contentType: "application/x-directory",
245                 },
246         } {
247                 c.Logf("=== %v", trial)
248
249                 _, err := bucket.GetReader(trial.path)
250                 c.Assert(err, check.ErrorMatches, `404 Not Found`)
251
252                 buf := make([]byte, trial.size)
253                 rand.Read(buf)
254
255                 err = bucket.PutReader(trial.path, bytes.NewReader(buf), int64(len(buf)), trial.contentType, s3.Private, s3.Options{})
256                 c.Check(err, check.ErrorMatches, `400 Bad Request`)
257
258                 _, err = bucket.GetReader(trial.path)
259                 c.Assert(err, check.ErrorMatches, `404 Not Found`)
260         }
261 }
262
263 func (s *IntegrationSuite) TestS3CollectionPutObjectFailure(c *check.C) {
264         stage := s.s3setup(c)
265         defer stage.teardown(c)
266         s.testS3PutObjectFailure(c, stage.collbucket, "")
267 }
268 func (s *IntegrationSuite) TestS3ProjectPutObjectFailure(c *check.C) {
269         stage := s.s3setup(c)
270         defer stage.teardown(c)
271         s.testS3PutObjectFailure(c, stage.projbucket, stage.coll.Name+"/")
272 }
273 func (s *IntegrationSuite) testS3PutObjectFailure(c *check.C, bucket *s3.Bucket, prefix string) {
274         s.testServer.Config.cluster.Collections.S3FolderObjects = false
275         var wg sync.WaitGroup
276         for _, trial := range []struct {
277                 path string
278         }{
279                 {
280                         path: "emptyfile/newname", // emptyfile exists, see s3setup()
281                 }, {
282                         path: "emptyfile/", // emptyfile exists, see s3setup()
283                 }, {
284                         path: "emptydir", // dir already exists, see s3setup()
285                 }, {
286                         path: "emptydir/",
287                 }, {
288                         path: "emptydir//",
289                 }, {
290                         path: "newdir/",
291                 }, {
292                         path: "newdir//",
293                 }, {
294                         path: "/",
295                 }, {
296                         path: "//",
297                 }, {
298                         path: "foo//bar",
299                 }, {
300                         path: "",
301                 },
302         } {
303                 trial := trial
304                 wg.Add(1)
305                 go func() {
306                         defer wg.Done()
307                         c.Logf("=== %v", trial)
308
309                         objname := prefix + trial.path
310
311                         buf := make([]byte, 1234)
312                         rand.Read(buf)
313
314                         err := bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), "application/octet-stream", s3.Private, s3.Options{})
315                         if !c.Check(err, check.ErrorMatches, `400 Bad.*`, check.Commentf("PUT %q should fail", objname)) {
316                                 return
317                         }
318
319                         if objname != "" && objname != "/" {
320                                 _, err = bucket.GetReader(objname)
321                                 c.Check(err, check.ErrorMatches, `404 Not Found`, check.Commentf("GET %q should return 404", objname))
322                         }
323                 }()
324         }
325         wg.Wait()
326 }
327
328 func (stage *s3stage) writeBigDirs(c *check.C, dirs int, filesPerDir int) {
329         fs, err := stage.coll.FileSystem(stage.arv, stage.kc)
330         c.Assert(err, check.IsNil)
331         for d := 0; d < dirs; d++ {
332                 dir := fmt.Sprintf("dir%d", d)
333                 c.Assert(fs.Mkdir(dir, 0755), check.IsNil)
334                 for i := 0; i < filesPerDir; i++ {
335                         f, err := fs.OpenFile(fmt.Sprintf("%s/file%d.txt", dir, i), os.O_CREATE|os.O_WRONLY, 0644)
336                         c.Assert(err, check.IsNil)
337                         c.Assert(f.Close(), check.IsNil)
338                 }
339         }
340         c.Assert(fs.Sync(), check.IsNil)
341 }
342
343 func (s *IntegrationSuite) TestS3GetBucketVersioning(c *check.C) {
344         stage := s.s3setup(c)
345         defer stage.teardown(c)
346         for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
347                 req, err := http.NewRequest("GET", bucket.URL("/"), nil)
348                 req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
349                 req.URL.RawQuery = "versioning"
350                 resp, err := http.DefaultClient.Do(req)
351                 c.Assert(err, check.IsNil)
352                 c.Check(resp.Header.Get("Content-Type"), check.Equals, "application/xml")
353                 buf, err := ioutil.ReadAll(resp.Body)
354                 c.Assert(err, check.IsNil)
355                 c.Check(string(buf), check.Equals, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>\n")
356         }
357 }
358
359 func (s *IntegrationSuite) TestS3CollectionList(c *check.C) {
360         stage := s.s3setup(c)
361         defer stage.teardown(c)
362
363         var markers int
364         for markers, s.testServer.Config.cluster.Collections.S3FolderObjects = range []bool{false, true} {
365                 dirs := 2
366                 filesPerDir := 1001
367                 stage.writeBigDirs(c, dirs, filesPerDir)
368                 // Total # objects is:
369                 //                 2 file entries from s3setup (emptyfile and sailboat.txt)
370                 //                +1 fake "directory" marker from s3setup (emptydir) (if enabled)
371                 //             +dirs fake "directory" marker from writeBigDirs (dir0/, dir1/) (if enabled)
372                 // +filesPerDir*dirs file entries from writeBigDirs (dir0/file0.txt, etc.)
373                 s.testS3List(c, stage.collbucket, "", 4000, markers+2+(filesPerDir+markers)*dirs)
374                 s.testS3List(c, stage.collbucket, "", 131, markers+2+(filesPerDir+markers)*dirs)
375                 s.testS3List(c, stage.collbucket, "dir0/", 71, filesPerDir+markers)
376         }
377 }
378 func (s *IntegrationSuite) testS3List(c *check.C, bucket *s3.Bucket, prefix string, pageSize, expectFiles int) {
379         c.Logf("testS3List: prefix=%q pageSize=%d S3FolderObjects=%v", prefix, pageSize, s.testServer.Config.cluster.Collections.S3FolderObjects)
380         expectPageSize := pageSize
381         if expectPageSize > 1000 {
382                 expectPageSize = 1000
383         }
384         gotKeys := map[string]s3.Key{}
385         nextMarker := ""
386         pages := 0
387         for {
388                 resp, err := bucket.List(prefix, "", nextMarker, pageSize)
389                 if !c.Check(err, check.IsNil) {
390                         break
391                 }
392                 c.Check(len(resp.Contents) <= expectPageSize, check.Equals, true)
393                 if pages++; !c.Check(pages <= (expectFiles/expectPageSize)+1, check.Equals, true) {
394                         break
395                 }
396                 for _, key := range resp.Contents {
397                         gotKeys[key.Key] = key
398                         if strings.Contains(key.Key, "sailboat.txt") {
399                                 c.Check(key.Size, check.Equals, int64(4))
400                         }
401                 }
402                 if !resp.IsTruncated {
403                         c.Check(resp.NextMarker, check.Equals, "")
404                         break
405                 }
406                 if !c.Check(resp.NextMarker, check.Not(check.Equals), "") {
407                         break
408                 }
409                 nextMarker = resp.NextMarker
410         }
411         c.Check(len(gotKeys), check.Equals, expectFiles)
412 }
413
414 func (s *IntegrationSuite) TestS3CollectionListRollup(c *check.C) {
415         for _, s.testServer.Config.cluster.Collections.S3FolderObjects = range []bool{false, true} {
416                 s.testS3CollectionListRollup(c)
417         }
418 }
419
420 func (s *IntegrationSuite) testS3CollectionListRollup(c *check.C) {
421         stage := s.s3setup(c)
422         defer stage.teardown(c)
423
424         dirs := 2
425         filesPerDir := 500
426         stage.writeBigDirs(c, dirs, filesPerDir)
427         err := stage.collbucket.PutReader("dingbats", &bytes.Buffer{}, 0, "application/octet-stream", s3.Private, s3.Options{})
428         c.Assert(err, check.IsNil)
429         var allfiles []string
430         for marker := ""; ; {
431                 resp, err := stage.collbucket.List("", "", marker, 20000)
432                 c.Check(err, check.IsNil)
433                 for _, key := range resp.Contents {
434                         if len(allfiles) == 0 || allfiles[len(allfiles)-1] != key.Key {
435                                 allfiles = append(allfiles, key.Key)
436                         }
437                 }
438                 marker = resp.NextMarker
439                 if marker == "" {
440                         break
441                 }
442         }
443         markers := 0
444         if s.testServer.Config.cluster.Collections.S3FolderObjects {
445                 markers = 1
446         }
447         c.Check(allfiles, check.HasLen, dirs*(filesPerDir+markers)+3+markers)
448
449         gotDirMarker := map[string]bool{}
450         for _, name := range allfiles {
451                 isDirMarker := strings.HasSuffix(name, "/")
452                 if markers == 0 {
453                         c.Check(isDirMarker, check.Equals, false, check.Commentf("name %q", name))
454                 } else if isDirMarker {
455                         gotDirMarker[name] = true
456                 } else if i := strings.LastIndex(name, "/"); i >= 0 {
457                         c.Check(gotDirMarker[name[:i+1]], check.Equals, true, check.Commentf("name %q", name))
458                         gotDirMarker[name[:i+1]] = true // skip redundant complaints about this dir marker
459                 }
460         }
461
462         for _, trial := range []struct {
463                 prefix    string
464                 delimiter string
465                 marker    string
466         }{
467                 {"", "", ""},
468                 {"di", "/", ""},
469                 {"di", "r", ""},
470                 {"di", "n", ""},
471                 {"dir0", "/", ""},
472                 {"dir0/", "/", ""},
473                 {"dir0/f", "/", ""},
474                 {"dir0", "", ""},
475                 {"dir0/", "", ""},
476                 {"dir0/f", "", ""},
477                 {"dir0", "/", "dir0/file14.txt"},       // no commonprefixes
478                 {"", "", "dir0/file14.txt"},            // middle page, skip walking dir1
479                 {"", "", "dir1/file14.txt"},            // middle page, skip walking dir0
480                 {"", "", "dir1/file498.txt"},           // last page of results
481                 {"dir1/file", "", "dir1/file498.txt"},  // last page of results, with prefix
482                 {"dir1/file", "/", "dir1/file498.txt"}, // last page of results, with prefix + delimiter
483                 {"dir1", "Z", "dir1/file498.txt"},      // delimiter "Z" never appears
484                 {"dir2", "/", ""},                      // prefix "dir2" does not exist
485                 {"", "/", ""},
486         } {
487                 c.Logf("\n\n=== trial %+v markers=%d", trial, markers)
488
489                 maxKeys := 20
490                 resp, err := stage.collbucket.List(trial.prefix, trial.delimiter, trial.marker, maxKeys)
491                 c.Check(err, check.IsNil)
492                 if resp.IsTruncated && trial.delimiter == "" {
493                         // goamz List method fills in the missing
494                         // NextMarker field if resp.IsTruncated, so
495                         // now we can't really tell whether it was
496                         // sent by the server or by goamz. In cases
497                         // where it should be empty but isn't, assume
498                         // it's goamz's fault.
499                         resp.NextMarker = ""
500                 }
501
502                 var expectKeys []string
503                 var expectPrefixes []string
504                 var expectNextMarker string
505                 var expectTruncated bool
506                 for _, key := range allfiles {
507                         full := len(expectKeys)+len(expectPrefixes) >= maxKeys
508                         if !strings.HasPrefix(key, trial.prefix) || key < trial.marker {
509                                 continue
510                         } else if idx := strings.Index(key[len(trial.prefix):], trial.delimiter); trial.delimiter != "" && idx >= 0 {
511                                 prefix := key[:len(trial.prefix)+idx+1]
512                                 if len(expectPrefixes) > 0 && expectPrefixes[len(expectPrefixes)-1] == prefix {
513                                         // same prefix as previous key
514                                 } else if full {
515                                         expectNextMarker = key
516                                         expectTruncated = true
517                                 } else {
518                                         expectPrefixes = append(expectPrefixes, prefix)
519                                 }
520                         } else if full {
521                                 if trial.delimiter != "" {
522                                         expectNextMarker = key
523                                 }
524                                 expectTruncated = true
525                                 break
526                         } else {
527                                 expectKeys = append(expectKeys, key)
528                         }
529                 }
530
531                 var gotKeys []string
532                 for _, key := range resp.Contents {
533                         gotKeys = append(gotKeys, key.Key)
534                 }
535                 var gotPrefixes []string
536                 for _, prefix := range resp.CommonPrefixes {
537                         gotPrefixes = append(gotPrefixes, prefix)
538                 }
539                 commentf := check.Commentf("trial %+v markers=%d", trial, markers)
540                 c.Check(gotKeys, check.DeepEquals, expectKeys, commentf)
541                 c.Check(gotPrefixes, check.DeepEquals, expectPrefixes, commentf)
542                 c.Check(resp.NextMarker, check.Equals, expectNextMarker, commentf)
543                 c.Check(resp.IsTruncated, check.Equals, expectTruncated, commentf)
544                 c.Logf("=== trial %+v keys %q prefixes %q nextMarker %q", trial, gotKeys, gotPrefixes, resp.NextMarker)
545         }
546 }