+func (s *IntegrationSuite) TestS3GetBucketLocation(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+ for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
+ req, err := http.NewRequest("GET", bucket.URL("/"), nil)
+ c.Check(err, check.IsNil)
+ req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
+ req.URL.RawQuery = "location"
+ resp, err := http.DefaultClient.Do(req)
+ c.Assert(err, check.IsNil)
+ c.Check(resp.Header.Get("Content-Type"), check.Equals, "application/xml")
+ buf, err := ioutil.ReadAll(resp.Body)
+ c.Assert(err, check.IsNil)
+ c.Check(string(buf), check.Equals, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<LocationConstraint><LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">zzzzz</LocationConstraint></LocationConstraint>\n")
+ }
+}
+
+func (s *IntegrationSuite) TestS3GetBucketVersioning(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+ for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
+ req, err := http.NewRequest("GET", bucket.URL("/"), nil)
+ c.Check(err, check.IsNil)
+ req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
+ req.URL.RawQuery = "versioning"
+ resp, err := http.DefaultClient.Do(req)
+ c.Assert(err, check.IsNil)
+ c.Check(resp.Header.Get("Content-Type"), check.Equals, "application/xml")
+ buf, err := ioutil.ReadAll(resp.Body)
+ c.Assert(err, check.IsNil)
+ c.Check(string(buf), check.Equals, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"/>\n")
+ }
+}
+
+func (s *IntegrationSuite) TestS3UnsupportedAPIs(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+ for _, trial := range []struct {
+ method string
+ path string
+ rawquery string
+ }{
+ {"GET", "/", "acl&versionId=1234"}, // GetBucketAcl
+ {"GET", "/foo", "acl&versionId=1234"}, // GetObjectAcl
+ {"PUT", "/", "acl"}, // PutBucketAcl
+ {"PUT", "/foo", "acl"}, // PutObjectAcl
+ {"DELETE", "/", "tagging"}, // DeleteBucketTagging
+ {"DELETE", "/foo", "tagging"}, // DeleteObjectTagging
+ } {
+ for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
+ c.Logf("trial %v bucket %v", trial, bucket)
+ req, err := http.NewRequest(trial.method, bucket.URL(trial.path), nil)
+ c.Check(err, check.IsNil)
+ req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
+ req.URL.RawQuery = trial.rawquery
+ resp, err := http.DefaultClient.Do(req)
+ c.Assert(err, check.IsNil)
+ c.Check(resp.Header.Get("Content-Type"), check.Equals, "application/xml")
+ buf, err := ioutil.ReadAll(resp.Body)
+ c.Assert(err, check.IsNil)
+ c.Check(string(buf), check.Matches, "(?ms).*InvalidRequest.*API not supported.*")
+ }
+ }
+}
+
+// If there are no CommonPrefixes entries, the CommonPrefixes XML tag
+// should not appear at all.
+func (s *IntegrationSuite) TestS3ListNoCommonPrefixes(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+
+ req, err := http.NewRequest("GET", stage.collbucket.URL("/"), nil)
+ c.Assert(err, check.IsNil)
+ req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
+ req.URL.RawQuery = "prefix=asdfasdfasdf&delimiter=/"
+ resp, err := http.DefaultClient.Do(req)
+ c.Assert(err, check.IsNil)
+ buf, err := ioutil.ReadAll(resp.Body)
+ c.Assert(err, check.IsNil)
+ c.Check(string(buf), check.Not(check.Matches), `(?ms).*CommonPrefixes.*`)
+}
+
+// If there is no delimiter in the request, or the results are not
+// truncated, the NextMarker XML tag should not appear in the response
+// body.
+func (s *IntegrationSuite) TestS3ListNoNextMarker(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+
+ for _, query := range []string{"prefix=e&delimiter=/", ""} {
+ req, err := http.NewRequest("GET", stage.collbucket.URL("/"), nil)
+ c.Assert(err, check.IsNil)
+ req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
+ req.URL.RawQuery = query
+ resp, err := http.DefaultClient.Do(req)
+ c.Assert(err, check.IsNil)
+ buf, err := ioutil.ReadAll(resp.Body)
+ c.Assert(err, check.IsNil)
+ c.Check(string(buf), check.Not(check.Matches), `(?ms).*NextMarker.*`)
+ }
+}
+
+// List response should include KeyCount field.
+func (s *IntegrationSuite) TestS3ListKeyCount(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+
+ req, err := http.NewRequest("GET", stage.collbucket.URL("/"), nil)
+ c.Assert(err, check.IsNil)
+ req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
+ req.URL.RawQuery = "prefix=&delimiter=/"
+ resp, err := http.DefaultClient.Do(req)
+ c.Assert(err, check.IsNil)
+ buf, err := ioutil.ReadAll(resp.Body)
+ c.Assert(err, check.IsNil)
+ c.Check(string(buf), check.Matches, `(?ms).*<KeyCount>2</KeyCount>.*`)
+}
+
+func (s *IntegrationSuite) TestS3CollectionList(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+
+ var markers int
+ for markers, s.testServer.Config.cluster.Collections.S3FolderObjects = range []bool{false, true} {
+ dirs := 2
+ filesPerDir := 1001
+ stage.writeBigDirs(c, dirs, filesPerDir)
+ // Total # objects is:
+ // 2 file entries from s3setup (emptyfile and sailboat.txt)
+ // +1 fake "directory" marker from s3setup (emptydir) (if enabled)
+ // +dirs fake "directory" marker from writeBigDirs (dir0/, dir1/) (if enabled)
+ // +filesPerDir*dirs file entries from writeBigDirs (dir0/file0.txt, etc.)
+ s.testS3List(c, stage.collbucket, "", 4000, markers+2+(filesPerDir+markers)*dirs)
+ s.testS3List(c, stage.collbucket, "", 131, markers+2+(filesPerDir+markers)*dirs)
+ s.testS3List(c, stage.collbucket, "dir0/", 71, filesPerDir+markers)
+ }
+}
+func (s *IntegrationSuite) testS3List(c *check.C, bucket *s3.Bucket, prefix string, pageSize, expectFiles int) {
+ c.Logf("testS3List: prefix=%q pageSize=%d S3FolderObjects=%v", prefix, pageSize, s.testServer.Config.cluster.Collections.S3FolderObjects)
+ expectPageSize := pageSize
+ if expectPageSize > 1000 {
+ expectPageSize = 1000
+ }
+ gotKeys := map[string]s3.Key{}
+ nextMarker := ""
+ pages := 0
+ for {
+ resp, err := bucket.List(prefix, "", nextMarker, pageSize)
+ if !c.Check(err, check.IsNil) {
+ break
+ }
+ c.Check(len(resp.Contents) <= expectPageSize, check.Equals, true)
+ if pages++; !c.Check(pages <= (expectFiles/expectPageSize)+1, check.Equals, true) {
+ break
+ }
+ for _, key := range resp.Contents {
+ gotKeys[key.Key] = key
+ if strings.Contains(key.Key, "sailboat.txt") {
+ c.Check(key.Size, check.Equals, int64(4))
+ }
+ }
+ if !resp.IsTruncated {
+ c.Check(resp.NextMarker, check.Equals, "")
+ break
+ }
+ if !c.Check(resp.NextMarker, check.Not(check.Equals), "") {
+ break
+ }
+ nextMarker = resp.NextMarker
+ }
+ c.Check(len(gotKeys), check.Equals, expectFiles)
+}
+
+func (s *IntegrationSuite) TestS3CollectionListRollup(c *check.C) {
+ for _, s.testServer.Config.cluster.Collections.S3FolderObjects = range []bool{false, true} {
+ s.testS3CollectionListRollup(c)
+ }
+}
+
+func (s *IntegrationSuite) testS3CollectionListRollup(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+
+ dirs := 2
+ filesPerDir := 500
+ stage.writeBigDirs(c, dirs, filesPerDir)
+ err := stage.collbucket.PutReader("dingbats", &bytes.Buffer{}, 0, "application/octet-stream", s3.Private, s3.Options{})
+ c.Assert(err, check.IsNil)
+ var allfiles []string
+ for marker := ""; ; {
+ resp, err := stage.collbucket.List("", "", marker, 20000)
+ c.Check(err, check.IsNil)
+ for _, key := range resp.Contents {
+ if len(allfiles) == 0 || allfiles[len(allfiles)-1] != key.Key {
+ allfiles = append(allfiles, key.Key)
+ }
+ }
+ marker = resp.NextMarker
+ if marker == "" {
+ break
+ }
+ }
+ markers := 0
+ if s.testServer.Config.cluster.Collections.S3FolderObjects {
+ markers = 1
+ }
+ c.Check(allfiles, check.HasLen, dirs*(filesPerDir+markers)+3+markers)
+
+ gotDirMarker := map[string]bool{}
+ for _, name := range allfiles {
+ isDirMarker := strings.HasSuffix(name, "/")
+ if markers == 0 {
+ c.Check(isDirMarker, check.Equals, false, check.Commentf("name %q", name))
+ } else if isDirMarker {
+ gotDirMarker[name] = true
+ } else if i := strings.LastIndex(name, "/"); i >= 0 {
+ c.Check(gotDirMarker[name[:i+1]], check.Equals, true, check.Commentf("name %q", name))
+ gotDirMarker[name[:i+1]] = true // skip redundant complaints about this dir marker
+ }
+ }
+
+ for _, trial := range []struct {
+ prefix string
+ delimiter string
+ marker string
+ }{
+ {"", "", ""},
+ {"di", "/", ""},
+ {"di", "r", ""},
+ {"di", "n", ""},
+ {"dir0", "/", ""},
+ {"dir0/", "/", ""},
+ {"dir0/f", "/", ""},
+ {"dir0", "", ""},
+ {"dir0/", "", ""},
+ {"dir0/f", "", ""},
+ {"dir0", "/", "dir0/file14.txt"}, // no commonprefixes
+ {"", "", "dir0/file14.txt"}, // middle page, skip walking dir1
+ {"", "", "dir1/file14.txt"}, // middle page, skip walking dir0
+ {"", "", "dir1/file498.txt"}, // last page of results
+ {"dir1/file", "", "dir1/file498.txt"}, // last page of results, with prefix
+ {"dir1/file", "/", "dir1/file498.txt"}, // last page of results, with prefix + delimiter
+ {"dir1", "Z", "dir1/file498.txt"}, // delimiter "Z" never appears
+ {"dir2", "/", ""}, // prefix "dir2" does not exist
+ {"", "/", ""},
+ } {
+ c.Logf("\n\n=== trial %+v markers=%d", trial, markers)
+
+ maxKeys := 20
+ resp, err := stage.collbucket.List(trial.prefix, trial.delimiter, trial.marker, maxKeys)
+ c.Check(err, check.IsNil)
+ if resp.IsTruncated && trial.delimiter == "" {
+ // goamz List method fills in the missing
+ // NextMarker field if resp.IsTruncated, so
+ // now we can't really tell whether it was
+ // sent by the server or by goamz. In cases
+ // where it should be empty but isn't, assume
+ // it's goamz's fault.
+ resp.NextMarker = ""
+ }
+
+ var expectKeys []string
+ var expectPrefixes []string
+ var expectNextMarker string
+ var expectTruncated bool
+ for _, key := range allfiles {
+ full := len(expectKeys)+len(expectPrefixes) >= maxKeys
+ if !strings.HasPrefix(key, trial.prefix) || key < trial.marker {
+ continue
+ } else if idx := strings.Index(key[len(trial.prefix):], trial.delimiter); trial.delimiter != "" && idx >= 0 {
+ prefix := key[:len(trial.prefix)+idx+1]
+ if len(expectPrefixes) > 0 && expectPrefixes[len(expectPrefixes)-1] == prefix {
+ // same prefix as previous key
+ } else if full {
+ expectNextMarker = key
+ expectTruncated = true
+ } else {
+ expectPrefixes = append(expectPrefixes, prefix)
+ }
+ } else if full {
+ if trial.delimiter != "" {
+ expectNextMarker = key
+ }
+ expectTruncated = true
+ break
+ } else {
+ expectKeys = append(expectKeys, key)
+ }
+ }
+
+ var gotKeys []string
+ for _, key := range resp.Contents {
+ gotKeys = append(gotKeys, key.Key)
+ }
+ var gotPrefixes []string
+ for _, prefix := range resp.CommonPrefixes {
+ gotPrefixes = append(gotPrefixes, prefix)
+ }
+ commentf := check.Commentf("trial %+v markers=%d", trial, markers)
+ c.Check(gotKeys, check.DeepEquals, expectKeys, commentf)
+ c.Check(gotPrefixes, check.DeepEquals, expectPrefixes, commentf)
+ c.Check(resp.NextMarker, check.Equals, expectNextMarker, commentf)
+ c.Check(resp.IsTruncated, check.Equals, expectTruncated, commentf)
+ c.Logf("=== trial %+v keys %q prefixes %q nextMarker %q", trial, gotKeys, gotPrefixes, resp.NextMarker)
+ }
+}
+
+func (s *IntegrationSuite) TestS3ListObjectsV2(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+ dirs := 2
+ filesPerDir := 40
+ stage.writeBigDirs(c, dirs, filesPerDir)
+
+ sess := aws_session.Must(aws_session.NewSession(&aws_aws.Config{
+ Region: aws_aws.String("auto"),
+ Endpoint: aws_aws.String("http://" + s.testServer.Addr),
+ Credentials: aws_credentials.NewStaticCredentials(url.QueryEscape(arvadostest.ActiveTokenV2), url.QueryEscape(arvadostest.ActiveTokenV2), ""),
+ S3ForcePathStyle: aws_aws.Bool(true),
+ }))
+
+ stringOrNil := func(s string) *string {
+ if s == "" {
+ return nil
+ } else {
+ return &s
+ }