Merge branch '16726-anon-user-token' refs #16726
[arvados.git] / services / keep-web / s3_test.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package main
6
7 import (
8         "bytes"
9         "crypto/rand"
10         "fmt"
11         "io/ioutil"
12         "net/http"
13         "os"
14         "strings"
15         "sync"
16         "time"
17
18         "git.arvados.org/arvados.git/sdk/go/arvados"
19         "git.arvados.org/arvados.git/sdk/go/arvadosclient"
20         "git.arvados.org/arvados.git/sdk/go/arvadostest"
21         "git.arvados.org/arvados.git/sdk/go/keepclient"
22         "github.com/AdRoll/goamz/aws"
23         "github.com/AdRoll/goamz/s3"
24         check "gopkg.in/check.v1"
25 )
26
27 type s3stage struct {
28         arv        *arvados.Client
29         ac         *arvadosclient.ArvadosClient
30         kc         *keepclient.KeepClient
31         proj       arvados.Group
32         projbucket *s3.Bucket
33         coll       arvados.Collection
34         collbucket *s3.Bucket
35 }
36
37 func (s *IntegrationSuite) s3setup(c *check.C) s3stage {
38         var proj arvados.Group
39         var coll arvados.Collection
40         arv := arvados.NewClientFromEnv()
41         arv.AuthToken = arvadostest.ActiveToken
42         err := arv.RequestAndDecode(&proj, "POST", "arvados/v1/groups", nil, map[string]interface{}{
43                 "group": map[string]interface{}{
44                         "group_class": "project",
45                         "name":        "keep-web s3 test",
46                 },
47                 "ensure_unique_name": true,
48         })
49         c.Assert(err, check.IsNil)
50         err = arv.RequestAndDecode(&coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{"collection": map[string]interface{}{
51                 "owner_uuid":    proj.UUID,
52                 "name":          "keep-web s3 test collection",
53                 "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:emptyfile\n./emptydir d41d8cd98f00b204e9800998ecf8427e+0 0:0:.\n",
54         }})
55         c.Assert(err, check.IsNil)
56         ac, err := arvadosclient.New(arv)
57         c.Assert(err, check.IsNil)
58         kc, err := keepclient.MakeKeepClient(ac)
59         c.Assert(err, check.IsNil)
60         fs, err := coll.FileSystem(arv, kc)
61         c.Assert(err, check.IsNil)
62         f, err := fs.OpenFile("sailboat.txt", os.O_CREATE|os.O_WRONLY, 0644)
63         c.Assert(err, check.IsNil)
64         _, err = f.Write([]byte("⛵\n"))
65         c.Assert(err, check.IsNil)
66         err = f.Close()
67         c.Assert(err, check.IsNil)
68         err = fs.Sync()
69         c.Assert(err, check.IsNil)
70         err = arv.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
71         c.Assert(err, check.IsNil)
72
73         auth := aws.NewAuth(arvadostest.ActiveTokenV2, arvadostest.ActiveTokenV2, "", time.Now().Add(time.Hour))
74         region := aws.Region{
75                 Name:       s.testServer.Addr,
76                 S3Endpoint: "http://" + s.testServer.Addr,
77         }
78         client := s3.New(*auth, region)
79         return s3stage{
80                 arv:  arv,
81                 ac:   ac,
82                 kc:   kc,
83                 proj: proj,
84                 projbucket: &s3.Bucket{
85                         S3:   client,
86                         Name: proj.UUID,
87                 },
88                 coll: coll,
89                 collbucket: &s3.Bucket{
90                         S3:   client,
91                         Name: coll.UUID,
92                 },
93         }
94 }
95
96 func (stage s3stage) teardown(c *check.C) {
97         if stage.coll.UUID != "" {
98                 err := stage.arv.RequestAndDecode(&stage.coll, "DELETE", "arvados/v1/collections/"+stage.coll.UUID, nil, nil)
99                 c.Check(err, check.IsNil)
100         }
101         if stage.proj.UUID != "" {
102                 err := stage.arv.RequestAndDecode(&stage.proj, "DELETE", "arvados/v1/groups/"+stage.proj.UUID, nil, nil)
103                 c.Check(err, check.IsNil)
104         }
105 }
106
107 func (s *IntegrationSuite) TestS3HeadBucket(c *check.C) {
108         stage := s.s3setup(c)
109         defer stage.teardown(c)
110
111         for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
112                 c.Logf("bucket %s", bucket.Name)
113                 exists, err := bucket.Exists("")
114                 c.Check(err, check.IsNil)
115                 c.Check(exists, check.Equals, true)
116         }
117 }
118
119 func (s *IntegrationSuite) TestS3CollectionGetObject(c *check.C) {
120         stage := s.s3setup(c)
121         defer stage.teardown(c)
122         s.testS3GetObject(c, stage.collbucket, "")
123 }
124 func (s *IntegrationSuite) TestS3ProjectGetObject(c *check.C) {
125         stage := s.s3setup(c)
126         defer stage.teardown(c)
127         s.testS3GetObject(c, stage.projbucket, stage.coll.Name+"/")
128 }
129 func (s *IntegrationSuite) testS3GetObject(c *check.C, bucket *s3.Bucket, prefix string) {
130         rdr, err := bucket.GetReader(prefix + "emptyfile")
131         c.Assert(err, check.IsNil)
132         buf, err := ioutil.ReadAll(rdr)
133         c.Check(err, check.IsNil)
134         c.Check(len(buf), check.Equals, 0)
135         err = rdr.Close()
136         c.Check(err, check.IsNil)
137
138         // GetObject
139         rdr, err = bucket.GetReader(prefix + "missingfile")
140         c.Check(err, check.ErrorMatches, `404 Not Found`)
141
142         // HeadObject
143         exists, err := bucket.Exists(prefix + "missingfile")
144         c.Check(err, check.IsNil)
145         c.Check(exists, check.Equals, false)
146
147         // GetObject
148         rdr, err = bucket.GetReader(prefix + "sailboat.txt")
149         c.Assert(err, check.IsNil)
150         buf, err = ioutil.ReadAll(rdr)
151         c.Check(err, check.IsNil)
152         c.Check(buf, check.DeepEquals, []byte("⛵\n"))
153         err = rdr.Close()
154         c.Check(err, check.IsNil)
155
156         // HeadObject
157         resp, err := bucket.Head(prefix+"sailboat.txt", nil)
158         c.Check(err, check.IsNil)
159         c.Check(resp.StatusCode, check.Equals, http.StatusOK)
160         c.Check(resp.ContentLength, check.Equals, int64(4))
161 }
162
163 func (s *IntegrationSuite) TestS3CollectionPutObjectSuccess(c *check.C) {
164         stage := s.s3setup(c)
165         defer stage.teardown(c)
166         s.testS3PutObjectSuccess(c, stage.collbucket, "")
167 }
168 func (s *IntegrationSuite) TestS3ProjectPutObjectSuccess(c *check.C) {
169         stage := s.s3setup(c)
170         defer stage.teardown(c)
171         s.testS3PutObjectSuccess(c, stage.projbucket, stage.coll.Name+"/")
172 }
173 func (s *IntegrationSuite) testS3PutObjectSuccess(c *check.C, bucket *s3.Bucket, prefix string) {
174         for _, trial := range []struct {
175                 path        string
176                 size        int
177                 contentType string
178         }{
179                 {
180                         path:        "newfile",
181                         size:        128000000,
182                         contentType: "application/octet-stream",
183                 }, {
184                         path:        "newdir/newfile",
185                         size:        1 << 26,
186                         contentType: "application/octet-stream",
187                 }, {
188                         path:        "newdir1/newdir2/newfile",
189                         size:        0,
190                         contentType: "application/octet-stream",
191                 }, {
192                         path:        "newdir1/newdir2/newdir3/",
193                         size:        0,
194                         contentType: "application/x-directory",
195                 },
196         } {
197                 c.Logf("=== %v", trial)
198
199                 objname := prefix + trial.path
200
201                 _, err := bucket.GetReader(objname)
202                 c.Assert(err, check.ErrorMatches, `404 Not Found`)
203
204                 buf := make([]byte, trial.size)
205                 rand.Read(buf)
206
207                 err = bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), trial.contentType, s3.Private, s3.Options{})
208                 c.Check(err, check.IsNil)
209
210                 rdr, err := bucket.GetReader(objname)
211                 if strings.HasSuffix(trial.path, "/") && !s.testServer.Config.cluster.Collections.S3FolderObjects {
212                         c.Check(err, check.NotNil)
213                         continue
214                 } else if !c.Check(err, check.IsNil) {
215                         continue
216                 }
217                 buf2, err := ioutil.ReadAll(rdr)
218                 c.Check(err, check.IsNil)
219                 c.Check(buf2, check.HasLen, len(buf))
220                 c.Check(bytes.Equal(buf, buf2), check.Equals, true)
221         }
222 }
223
224 func (s *IntegrationSuite) TestS3ProjectPutObjectNotSupported(c *check.C) {
225         stage := s.s3setup(c)
226         defer stage.teardown(c)
227         bucket := stage.projbucket
228
229         for _, trial := range []struct {
230                 path        string
231                 size        int
232                 contentType string
233         }{
234                 {
235                         path:        "newfile",
236                         size:        1234,
237                         contentType: "application/octet-stream",
238                 }, {
239                         path:        "newdir/newfile",
240                         size:        1234,
241                         contentType: "application/octet-stream",
242                 }, {
243                         path:        "newdir2/",
244                         size:        0,
245                         contentType: "application/x-directory",
246                 },
247         } {
248                 c.Logf("=== %v", trial)
249
250                 _, err := bucket.GetReader(trial.path)
251                 c.Assert(err, check.ErrorMatches, `404 Not Found`)
252
253                 buf := make([]byte, trial.size)
254                 rand.Read(buf)
255
256                 err = bucket.PutReader(trial.path, bytes.NewReader(buf), int64(len(buf)), trial.contentType, s3.Private, s3.Options{})
257                 c.Check(err, check.ErrorMatches, `400 Bad Request`)
258
259                 _, err = bucket.GetReader(trial.path)
260                 c.Assert(err, check.ErrorMatches, `404 Not Found`)
261         }
262 }
263
264 func (s *IntegrationSuite) TestS3CollectionDeleteObject(c *check.C) {
265         stage := s.s3setup(c)
266         defer stage.teardown(c)
267         s.testS3DeleteObject(c, stage.collbucket, "")
268 }
269 func (s *IntegrationSuite) TestS3ProjectDeleteObject(c *check.C) {
270         stage := s.s3setup(c)
271         defer stage.teardown(c)
272         s.testS3DeleteObject(c, stage.projbucket, stage.coll.Name+"/")
273 }
274 func (s *IntegrationSuite) testS3DeleteObject(c *check.C, bucket *s3.Bucket, prefix string) {
275         s.testServer.Config.cluster.Collections.S3FolderObjects = true
276         for _, trial := range []struct {
277                 path string
278         }{
279                 {"/"},
280                 {"nonexistentfile"},
281                 {"emptyfile"},
282                 {"sailboat.txt"},
283                 {"sailboat.txt/"},
284                 {"emptydir"},
285                 {"emptydir/"},
286         } {
287                 objname := prefix + trial.path
288                 comment := check.Commentf("objname %q", objname)
289
290                 err := bucket.Del(objname)
291                 if trial.path == "/" {
292                         c.Check(err, check.NotNil)
293                         continue
294                 }
295                 c.Check(err, check.IsNil, comment)
296                 _, err = bucket.GetReader(objname)
297                 c.Check(err, check.NotNil, comment)
298         }
299 }
300
301 func (s *IntegrationSuite) TestS3CollectionPutObjectFailure(c *check.C) {
302         stage := s.s3setup(c)
303         defer stage.teardown(c)
304         s.testS3PutObjectFailure(c, stage.collbucket, "")
305 }
306 func (s *IntegrationSuite) TestS3ProjectPutObjectFailure(c *check.C) {
307         stage := s.s3setup(c)
308         defer stage.teardown(c)
309         s.testS3PutObjectFailure(c, stage.projbucket, stage.coll.Name+"/")
310 }
311 func (s *IntegrationSuite) testS3PutObjectFailure(c *check.C, bucket *s3.Bucket, prefix string) {
312         s.testServer.Config.cluster.Collections.S3FolderObjects = false
313         var wg sync.WaitGroup
314         for _, trial := range []struct {
315                 path string
316         }{
317                 {
318                         path: "emptyfile/newname", // emptyfile exists, see s3setup()
319                 }, {
320                         path: "emptyfile/", // emptyfile exists, see s3setup()
321                 }, {
322                         path: "emptydir", // dir already exists, see s3setup()
323                 }, {
324                         path: "emptydir/",
325                 }, {
326                         path: "emptydir//",
327                 }, {
328                         path: "newdir/",
329                 }, {
330                         path: "newdir//",
331                 }, {
332                         path: "/",
333                 }, {
334                         path: "//",
335                 }, {
336                         path: "foo//bar",
337                 }, {
338                         path: "",
339                 },
340         } {
341                 trial := trial
342                 wg.Add(1)
343                 go func() {
344                         defer wg.Done()
345                         c.Logf("=== %v", trial)
346
347                         objname := prefix + trial.path
348
349                         buf := make([]byte, 1234)
350                         rand.Read(buf)
351
352                         err := bucket.PutReader(objname, bytes.NewReader(buf), int64(len(buf)), "application/octet-stream", s3.Private, s3.Options{})
353                         if !c.Check(err, check.ErrorMatches, `400 Bad.*`, check.Commentf("PUT %q should fail", objname)) {
354                                 return
355                         }
356
357                         if objname != "" && objname != "/" {
358                                 _, err = bucket.GetReader(objname)
359                                 c.Check(err, check.ErrorMatches, `404 Not Found`, check.Commentf("GET %q should return 404", objname))
360                         }
361                 }()
362         }
363         wg.Wait()
364 }
365
366 func (stage *s3stage) writeBigDirs(c *check.C, dirs int, filesPerDir int) {
367         fs, err := stage.coll.FileSystem(stage.arv, stage.kc)
368         c.Assert(err, check.IsNil)
369         for d := 0; d < dirs; d++ {
370                 dir := fmt.Sprintf("dir%d", d)
371                 c.Assert(fs.Mkdir(dir, 0755), check.IsNil)
372                 for i := 0; i < filesPerDir; i++ {
373                         f, err := fs.OpenFile(fmt.Sprintf("%s/file%d.txt", dir, i), os.O_CREATE|os.O_WRONLY, 0644)
374                         c.Assert(err, check.IsNil)
375                         c.Assert(f.Close(), check.IsNil)
376                 }
377         }
378         c.Assert(fs.Sync(), check.IsNil)
379 }
380
381 func (s *IntegrationSuite) TestS3GetBucketVersioning(c *check.C) {
382         stage := s.s3setup(c)
383         defer stage.teardown(c)
384         for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
385                 req, err := http.NewRequest("GET", bucket.URL("/"), nil)
386                 c.Check(err, check.IsNil)
387                 req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
388                 req.URL.RawQuery = "versioning"
389                 resp, err := http.DefaultClient.Do(req)
390                 c.Assert(err, check.IsNil)
391                 c.Check(resp.Header.Get("Content-Type"), check.Equals, "application/xml")
392                 buf, err := ioutil.ReadAll(resp.Body)
393                 c.Assert(err, check.IsNil)
394                 c.Check(string(buf), check.Equals, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>\n")
395         }
396 }
397
398 // If there are no CommonPrefixes entries, the CommonPrefixes XML tag
399 // should not appear at all.
400 func (s *IntegrationSuite) TestS3ListNoCommonPrefixes(c *check.C) {
401         stage := s.s3setup(c)
402         defer stage.teardown(c)
403
404         req, err := http.NewRequest("GET", stage.collbucket.URL("/"), nil)
405         c.Assert(err, check.IsNil)
406         req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
407         req.URL.RawQuery = "prefix=asdfasdfasdf&delimiter=/"
408         resp, err := http.DefaultClient.Do(req)
409         c.Assert(err, check.IsNil)
410         buf, err := ioutil.ReadAll(resp.Body)
411         c.Assert(err, check.IsNil)
412         c.Check(string(buf), check.Not(check.Matches), `(?ms).*CommonPrefixes.*`)
413 }
414
415 // If there is no delimiter in the request, or the results are not
416 // truncated, the NextMarker XML tag should not appear in the response
417 // body.
418 func (s *IntegrationSuite) TestS3ListNoNextMarker(c *check.C) {
419         stage := s.s3setup(c)
420         defer stage.teardown(c)
421
422         for _, query := range []string{"prefix=e&delimiter=/", ""} {
423                 req, err := http.NewRequest("GET", stage.collbucket.URL("/"), nil)
424                 c.Assert(err, check.IsNil)
425                 req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
426                 req.URL.RawQuery = query
427                 resp, err := http.DefaultClient.Do(req)
428                 c.Assert(err, check.IsNil)
429                 buf, err := ioutil.ReadAll(resp.Body)
430                 c.Assert(err, check.IsNil)
431                 c.Check(string(buf), check.Not(check.Matches), `(?ms).*NextMarker.*`)
432         }
433 }
434
435 func (s *IntegrationSuite) TestS3CollectionList(c *check.C) {
436         stage := s.s3setup(c)
437         defer stage.teardown(c)
438
439         var markers int
440         for markers, s.testServer.Config.cluster.Collections.S3FolderObjects = range []bool{false, true} {
441                 dirs := 2
442                 filesPerDir := 1001
443                 stage.writeBigDirs(c, dirs, filesPerDir)
444                 // Total # objects is:
445                 //                 2 file entries from s3setup (emptyfile and sailboat.txt)
446                 //                +1 fake "directory" marker from s3setup (emptydir) (if enabled)
447                 //             +dirs fake "directory" marker from writeBigDirs (dir0/, dir1/) (if enabled)
448                 // +filesPerDir*dirs file entries from writeBigDirs (dir0/file0.txt, etc.)
449                 s.testS3List(c, stage.collbucket, "", 4000, markers+2+(filesPerDir+markers)*dirs)
450                 s.testS3List(c, stage.collbucket, "", 131, markers+2+(filesPerDir+markers)*dirs)
451                 s.testS3List(c, stage.collbucket, "dir0/", 71, filesPerDir+markers)
452         }
453 }
454 func (s *IntegrationSuite) testS3List(c *check.C, bucket *s3.Bucket, prefix string, pageSize, expectFiles int) {
455         c.Logf("testS3List: prefix=%q pageSize=%d S3FolderObjects=%v", prefix, pageSize, s.testServer.Config.cluster.Collections.S3FolderObjects)
456         expectPageSize := pageSize
457         if expectPageSize > 1000 {
458                 expectPageSize = 1000
459         }
460         gotKeys := map[string]s3.Key{}
461         nextMarker := ""
462         pages := 0
463         for {
464                 resp, err := bucket.List(prefix, "", nextMarker, pageSize)
465                 if !c.Check(err, check.IsNil) {
466                         break
467                 }
468                 c.Check(len(resp.Contents) <= expectPageSize, check.Equals, true)
469                 if pages++; !c.Check(pages <= (expectFiles/expectPageSize)+1, check.Equals, true) {
470                         break
471                 }
472                 for _, key := range resp.Contents {
473                         gotKeys[key.Key] = key
474                         if strings.Contains(key.Key, "sailboat.txt") {
475                                 c.Check(key.Size, check.Equals, int64(4))
476                         }
477                 }
478                 if !resp.IsTruncated {
479                         c.Check(resp.NextMarker, check.Equals, "")
480                         break
481                 }
482                 if !c.Check(resp.NextMarker, check.Not(check.Equals), "") {
483                         break
484                 }
485                 nextMarker = resp.NextMarker
486         }
487         c.Check(len(gotKeys), check.Equals, expectFiles)
488 }
489
490 func (s *IntegrationSuite) TestS3CollectionListRollup(c *check.C) {
491         for _, s.testServer.Config.cluster.Collections.S3FolderObjects = range []bool{false, true} {
492                 s.testS3CollectionListRollup(c)
493         }
494 }
495
496 func (s *IntegrationSuite) testS3CollectionListRollup(c *check.C) {
497         stage := s.s3setup(c)
498         defer stage.teardown(c)
499
500         dirs := 2
501         filesPerDir := 500
502         stage.writeBigDirs(c, dirs, filesPerDir)
503         err := stage.collbucket.PutReader("dingbats", &bytes.Buffer{}, 0, "application/octet-stream", s3.Private, s3.Options{})
504         c.Assert(err, check.IsNil)
505         var allfiles []string
506         for marker := ""; ; {
507                 resp, err := stage.collbucket.List("", "", marker, 20000)
508                 c.Check(err, check.IsNil)
509                 for _, key := range resp.Contents {
510                         if len(allfiles) == 0 || allfiles[len(allfiles)-1] != key.Key {
511                                 allfiles = append(allfiles, key.Key)
512                         }
513                 }
514                 marker = resp.NextMarker
515                 if marker == "" {
516                         break
517                 }
518         }
519         markers := 0
520         if s.testServer.Config.cluster.Collections.S3FolderObjects {
521                 markers = 1
522         }
523         c.Check(allfiles, check.HasLen, dirs*(filesPerDir+markers)+3+markers)
524
525         gotDirMarker := map[string]bool{}
526         for _, name := range allfiles {
527                 isDirMarker := strings.HasSuffix(name, "/")
528                 if markers == 0 {
529                         c.Check(isDirMarker, check.Equals, false, check.Commentf("name %q", name))
530                 } else if isDirMarker {
531                         gotDirMarker[name] = true
532                 } else if i := strings.LastIndex(name, "/"); i >= 0 {
533                         c.Check(gotDirMarker[name[:i+1]], check.Equals, true, check.Commentf("name %q", name))
534                         gotDirMarker[name[:i+1]] = true // skip redundant complaints about this dir marker
535                 }
536         }
537
538         for _, trial := range []struct {
539                 prefix    string
540                 delimiter string
541                 marker    string
542         }{
543                 {"", "", ""},
544                 {"di", "/", ""},
545                 {"di", "r", ""},
546                 {"di", "n", ""},
547                 {"dir0", "/", ""},
548                 {"dir0/", "/", ""},
549                 {"dir0/f", "/", ""},
550                 {"dir0", "", ""},
551                 {"dir0/", "", ""},
552                 {"dir0/f", "", ""},
553                 {"dir0", "/", "dir0/file14.txt"},       // no commonprefixes
554                 {"", "", "dir0/file14.txt"},            // middle page, skip walking dir1
555                 {"", "", "dir1/file14.txt"},            // middle page, skip walking dir0
556                 {"", "", "dir1/file498.txt"},           // last page of results
557                 {"dir1/file", "", "dir1/file498.txt"},  // last page of results, with prefix
558                 {"dir1/file", "/", "dir1/file498.txt"}, // last page of results, with prefix + delimiter
559                 {"dir1", "Z", "dir1/file498.txt"},      // delimiter "Z" never appears
560                 {"dir2", "/", ""},                      // prefix "dir2" does not exist
561                 {"", "/", ""},
562         } {
563                 c.Logf("\n\n=== trial %+v markers=%d", trial, markers)
564
565                 maxKeys := 20
566                 resp, err := stage.collbucket.List(trial.prefix, trial.delimiter, trial.marker, maxKeys)
567                 c.Check(err, check.IsNil)
568                 if resp.IsTruncated && trial.delimiter == "" {
569                         // goamz List method fills in the missing
570                         // NextMarker field if resp.IsTruncated, so
571                         // now we can't really tell whether it was
572                         // sent by the server or by goamz. In cases
573                         // where it should be empty but isn't, assume
574                         // it's goamz's fault.
575                         resp.NextMarker = ""
576                 }
577
578                 var expectKeys []string
579                 var expectPrefixes []string
580                 var expectNextMarker string
581                 var expectTruncated bool
582                 for _, key := range allfiles {
583                         full := len(expectKeys)+len(expectPrefixes) >= maxKeys
584                         if !strings.HasPrefix(key, trial.prefix) || key < trial.marker {
585                                 continue
586                         } else if idx := strings.Index(key[len(trial.prefix):], trial.delimiter); trial.delimiter != "" && idx >= 0 {
587                                 prefix := key[:len(trial.prefix)+idx+1]
588                                 if len(expectPrefixes) > 0 && expectPrefixes[len(expectPrefixes)-1] == prefix {
589                                         // same prefix as previous key
590                                 } else if full {
591                                         expectNextMarker = key
592                                         expectTruncated = true
593                                 } else {
594                                         expectPrefixes = append(expectPrefixes, prefix)
595                                 }
596                         } else if full {
597                                 if trial.delimiter != "" {
598                                         expectNextMarker = key
599                                 }
600                                 expectTruncated = true
601                                 break
602                         } else {
603                                 expectKeys = append(expectKeys, key)
604                         }
605                 }
606
607                 var gotKeys []string
608                 for _, key := range resp.Contents {
609                         gotKeys = append(gotKeys, key.Key)
610                 }
611                 var gotPrefixes []string
612                 for _, prefix := range resp.CommonPrefixes {
613                         gotPrefixes = append(gotPrefixes, prefix)
614                 }
615                 commentf := check.Commentf("trial %+v markers=%d", trial, markers)
616                 c.Check(gotKeys, check.DeepEquals, expectKeys, commentf)
617                 c.Check(gotPrefixes, check.DeepEquals, expectPrefixes, commentf)
618                 c.Check(resp.NextMarker, check.Equals, expectNextMarker, commentf)
619                 c.Check(resp.IsTruncated, check.Equals, expectTruncated, commentf)
620                 c.Logf("=== trial %+v keys %q prefixes %q nextMarker %q", trial, gotKeys, gotPrefixes, resp.NextMarker)
621         }
622 }