1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
29 "git.arvados.org/arvados.git/sdk/go/arvados"
30 "git.arvados.org/arvados.git/sdk/go/ctxlog"
31 "github.com/AdRoll/goamz/s3"
36 s3SignAlgorithm = "AWS4-HMAC-SHA256"
37 s3MaxClockSkew = 5 * time.Minute
40 type commonPrefix struct {
44 type listV1Resp struct {
45 XMLName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
47 // s3.ListResp marshals an empty tag when
48 // CommonPrefixes is nil, which confuses some clients.
49 // Fix by using this nested struct instead.
50 CommonPrefixes []commonPrefix
51 // Similarly, we need omitempty here, because an empty
52 // tag confuses some clients (e.g.,
53 // github.com/aws/aws-sdk-net never terminates its
55 NextMarker string `xml:"NextMarker,omitempty"`
56 // ListObjectsV2 has a KeyCount response field.
60 type listV2Resp struct {
61 XMLName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
68 CommonPrefixes []commonPrefix
69 EncodingType string `xml:",omitempty"`
71 ContinuationToken string `xml:",omitempty"`
72 NextContinuationToken string `xml:",omitempty"`
73 StartAfter string `xml:",omitempty"`
76 func hmacstring(msg string, key []byte) []byte {
77 h := hmac.New(sha256.New, key)
78 io.WriteString(h, msg)
82 func hashdigest(h hash.Hash, payload string) string {
83 io.WriteString(h, payload)
84 return fmt.Sprintf("%x", h.Sum(nil))
87 // Signing key for given secret key and request attrs.
88 func s3signatureKey(key, datestamp, regionName, serviceName string) []byte {
89 return hmacstring("aws4_request",
90 hmacstring(serviceName,
91 hmacstring(regionName,
92 hmacstring(datestamp, []byte("AWS4"+key)))))
95 // Canonical query string for S3 V4 signature: sorted keys, spaces
96 // escaped as %20 instead of +, keyvalues joined with &.
97 func s3querystring(u *url.URL) string {
98 keys := make([]string, 0, len(u.Query()))
99 values := make(map[string]string, len(u.Query()))
100 for k, vs := range u.Query() {
101 k = strings.Replace(url.QueryEscape(k), "+", "%20", -1)
102 keys = append(keys, k)
103 for _, v := range vs {
104 v = strings.Replace(url.QueryEscape(v), "+", "%20", -1)
108 values[k] += k + "=" + v
112 for i, k := range keys {
115 return strings.Join(keys, "&")
118 var reMultipleSlashChars = regexp.MustCompile(`//+`)
120 func s3stringToSign(alg, scope, signedHeaders string, r *http.Request) (string, error) {
121 timefmt, timestr := "20060102T150405Z", r.Header.Get("X-Amz-Date")
123 timefmt, timestr = time.RFC1123, r.Header.Get("Date")
125 t, err := time.Parse(timefmt, timestr)
127 return "", fmt.Errorf("invalid timestamp %q: %s", timestr, err)
129 if skew := time.Now().Sub(t); skew < -s3MaxClockSkew || skew > s3MaxClockSkew {
130 return "", errors.New("exceeded max clock skew")
133 var canonicalHeaders string
134 for _, h := range strings.Split(signedHeaders, ";") {
136 canonicalHeaders += h + ":" + r.Host + "\n"
138 canonicalHeaders += h + ":" + r.Header.Get(h) + "\n"
142 normalizedPath := normalizePath(r.URL.Path)
143 ctxlog.FromContext(r.Context()).Debugf("normalizedPath %q", normalizedPath)
144 canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, normalizedPath, s3querystring(r.URL), canonicalHeaders, signedHeaders, r.Header.Get("X-Amz-Content-Sha256"))
145 ctxlog.FromContext(r.Context()).Debugf("s3stringToSign: canonicalRequest %s", canonicalRequest)
146 return fmt.Sprintf("%s\n%s\n%s\n%s", alg, r.Header.Get("X-Amz-Date"), scope, hashdigest(sha256.New(), canonicalRequest)), nil
149 func normalizePath(s string) string {
150 // (url.URL).EscapedPath() would be incorrect here. AWS
151 // documentation specifies the URL path should be normalized
152 // according to RFC 3986, i.e., unescaping ALPHA / DIGIT / "-"
153 // / "." / "_" / "~". The implication is that everything other
154 // than those chars (and "/") _must_ be percent-encoded --
155 // even chars like ";" and "," that are not normally
156 // percent-encoded in paths.
158 for _, c := range []byte(reMultipleSlashChars.ReplaceAllString(s, "/")) {
159 if (c >= 'a' && c <= 'z') ||
160 (c >= 'A' && c <= 'Z') ||
161 (c >= '0' && c <= '9') ||
169 out += fmt.Sprintf("%%%02X", c)
175 func s3signature(secretKey, scope, signedHeaders, stringToSign string) (string, error) {
176 // scope is {datestamp}/{region}/{service}/aws4_request
177 drs := strings.Split(scope, "/")
179 return "", fmt.Errorf("invalid scope %q", scope)
181 key := s3signatureKey(secretKey, drs[0], drs[1], drs[2])
182 return hashdigest(hmac.New(sha256.New, key), stringToSign), nil
185 var v2tokenUnderscore = regexp.MustCompile(`^v2_[a-z0-9]{5}-gj3su-[a-z0-9]{15}_`)
187 func unescapeKey(key string) string {
188 if v2tokenUnderscore.MatchString(key) {
189 // Entire Arvados token, with "/" replaced by "_" to
190 // avoid colliding with the Authorization header
192 return strings.Replace(key, "_", "/", -1)
193 } else if s, err := url.PathUnescape(key); err == nil {
200 // checks3signature verifies the given S3 V4 signature and returns the
201 // Arvados token that corresponds to the given accessKey. An error is
202 // returned if accessKey is not a valid token UUID or the signature
204 func (h *handler) checks3signature(r *http.Request) (string, error) {
205 var key, scope, signedHeaders, signature string
206 authstring := strings.TrimPrefix(r.Header.Get("Authorization"), s3SignAlgorithm+" ")
207 for _, cmpt := range strings.Split(authstring, ",") {
208 cmpt = strings.TrimSpace(cmpt)
209 split := strings.SplitN(cmpt, "=", 2)
211 case len(split) != 2:
213 case split[0] == "Credential":
214 keyandscope := strings.SplitN(split[1], "/", 2)
215 if len(keyandscope) == 2 {
216 key, scope = keyandscope[0], keyandscope[1]
218 case split[0] == "SignedHeaders":
219 signedHeaders = split[1]
220 case split[0] == "Signature":
225 client := (&arvados.Client{
226 APIHost: h.Cluster.Services.Controller.ExternalURL.Host,
227 Insecure: h.Cluster.TLS.Insecure,
228 }).WithRequestID(r.Header.Get("X-Request-Id"))
229 var aca arvados.APIClientAuthorization
232 if len(key) == 27 && key[5:12] == "-gj3su-" {
233 // Access key is the UUID of an Arvados token, secret
234 // key is the secret part.
235 ctx := arvados.ContextWithAuthorization(r.Context(), "Bearer "+h.Cluster.SystemRootToken)
236 err = client.RequestAndDecodeContext(ctx, &aca, "GET", "arvados/v1/api_client_authorizations/"+key, nil, nil)
237 secret = aca.APIToken
239 // Access key and secret key are both an entire
240 // Arvados token or OIDC access token.
241 ctx := arvados.ContextWithAuthorization(r.Context(), "Bearer "+unescapeKey(key))
242 err = client.RequestAndDecodeContext(ctx, &aca, "GET", "arvados/v1/api_client_authorizations/current", nil, nil)
246 ctxlog.FromContext(r.Context()).WithError(err).WithField("UUID", key).Info("token lookup failed")
247 return "", errors.New("invalid access key")
249 stringToSign, err := s3stringToSign(s3SignAlgorithm, scope, signedHeaders, r)
253 expect, err := s3signature(secret, scope, signedHeaders, stringToSign)
256 } else if expect != signature {
257 return "", fmt.Errorf("signature does not match (scope %q signedHeaders %q stringToSign %q)", scope, signedHeaders, stringToSign)
259 return aca.TokenV2(), nil
262 func s3ErrorResponse(w http.ResponseWriter, s3code string, message string, resource string, code int) {
263 w.Header().Set("Content-Type", "application/xml")
264 w.Header().Set("X-Content-Type-Options", "nosniff")
266 var errstruct struct {
272 errstruct.Code = s3code
273 errstruct.Message = message
274 errstruct.Resource = resource
275 errstruct.RequestId = ""
276 enc := xml.NewEncoder(w)
277 fmt.Fprint(w, xml.Header)
278 enc.EncodeElement(errstruct, xml.StartElement{Name: xml.Name{Local: "Error"}})
281 var NoSuchKey = "NoSuchKey"
282 var NoSuchBucket = "NoSuchBucket"
283 var InvalidArgument = "InvalidArgument"
284 var InternalError = "InternalError"
285 var UnauthorizedAccess = "UnauthorizedAccess"
286 var InvalidRequest = "InvalidRequest"
287 var SignatureDoesNotMatch = "SignatureDoesNotMatch"
289 var reRawQueryIndicatesAPI = regexp.MustCompile(`^[a-z]+(&|$)`)
291 // serveS3 handles r and returns true if r is a request from an S3
292 // client, otherwise it returns false.
293 func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
295 if auth := r.Header.Get("Authorization"); strings.HasPrefix(auth, "AWS ") {
296 split := strings.SplitN(auth[4:], ":", 2)
298 s3ErrorResponse(w, InvalidRequest, "malformed Authorization header", r.URL.Path, http.StatusUnauthorized)
301 token = unescapeKey(split[0])
302 } else if strings.HasPrefix(auth, s3SignAlgorithm+" ") {
303 t, err := h.checks3signature(r)
305 s3ErrorResponse(w, SignatureDoesNotMatch, "signature verification failed: "+err.Error(), r.URL.Path, http.StatusForbidden)
313 fs, sess, tokenUser, err := h.Cache.GetSession(token)
315 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
319 if writeMethod[r.Method] {
320 // Create a FileSystem for this request, to avoid
321 // exposing incomplete write operations to concurrent
323 client := sess.client.WithRequestID(r.Header.Get("X-Request-Id"))
324 fs = client.SiteFileSystem(sess.keepclient)
325 fs.ForwardSlashNameSubstitution(h.Cluster.Collections.ForwardSlashNameSubstitution)
328 var objectNameGiven bool
329 var bucketName string
331 if id := arvados.CollectionIDFromDNSName(r.Host); id != "" {
334 objectNameGiven = strings.Count(strings.TrimSuffix(r.URL.Path, "/"), "/") > 0
336 bucketName = strings.SplitN(strings.TrimPrefix(r.URL.Path, "/"), "/", 2)[0]
337 objectNameGiven = strings.Count(strings.TrimSuffix(r.URL.Path, "/"), "/") > 1
339 fspath += reMultipleSlashChars.ReplaceAllString(r.URL.Path, "/")
342 case r.Method == http.MethodGet && !objectNameGiven:
343 // Path is "/{uuid}" or "/{uuid}/", has no object name
344 if _, ok := r.URL.Query()["versioning"]; ok {
345 // GetBucketVersioning
346 w.Header().Set("Content-Type", "application/xml")
347 io.WriteString(w, xml.Header)
348 fmt.Fprintln(w, `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`)
349 } else if _, ok = r.URL.Query()["location"]; ok {
351 w.Header().Set("Content-Type", "application/xml")
352 io.WriteString(w, xml.Header)
353 fmt.Fprintln(w, `<LocationConstraint><LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">`+
355 `</LocationConstraint></LocationConstraint>`)
356 } else if reRawQueryIndicatesAPI.MatchString(r.URL.RawQuery) {
357 // GetBucketWebsite ("GET /bucketid/?website"), GetBucketTagging, etc.
358 s3ErrorResponse(w, InvalidRequest, "API not supported", r.URL.Path+"?"+r.URL.RawQuery, http.StatusBadRequest)
361 h.s3list(bucketName, w, r, fs)
364 case r.Method == http.MethodGet || r.Method == http.MethodHead:
365 if reRawQueryIndicatesAPI.MatchString(r.URL.RawQuery) {
366 // GetObjectRetention ("GET /bucketid/objectid?retention&versionID=..."), etc.
367 s3ErrorResponse(w, InvalidRequest, "API not supported", r.URL.Path+"?"+r.URL.RawQuery, http.StatusBadRequest)
370 fi, err := fs.Stat(fspath)
371 if r.Method == "HEAD" && !objectNameGiven {
373 if err == nil && fi.IsDir() {
374 err = setFileInfoHeaders(w.Header(), fs, fspath)
376 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
379 w.WriteHeader(http.StatusOK)
380 } else if os.IsNotExist(err) {
381 s3ErrorResponse(w, NoSuchBucket, "The specified bucket does not exist.", r.URL.Path, http.StatusNotFound)
383 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
387 if err == nil && fi.IsDir() && objectNameGiven && strings.HasSuffix(fspath, "/") && h.Cluster.Collections.S3FolderObjects {
388 err = setFileInfoHeaders(w.Header(), fs, fspath)
390 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
393 w.Header().Set("Content-Type", "application/x-directory")
394 w.WriteHeader(http.StatusOK)
397 if os.IsNotExist(err) ||
398 (err != nil && err.Error() == "not a directory") ||
399 (fi != nil && fi.IsDir()) {
400 s3ErrorResponse(w, NoSuchKey, "The specified key does not exist.", r.URL.Path, http.StatusNotFound)
404 if !h.userPermittedToUploadOrDownload(r.Method, tokenUser) {
405 http.Error(w, "Not permitted", http.StatusForbidden)
408 h.logUploadOrDownload(r, sess.arvadosclient, fs, fspath, nil, tokenUser)
410 // shallow copy r, and change URL path
413 err = setFileInfoHeaders(w.Header(), fs, fspath)
415 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
418 http.FileServer(fs).ServeHTTP(w, &r)
420 case r.Method == http.MethodPut:
421 if reRawQueryIndicatesAPI.MatchString(r.URL.RawQuery) {
422 // PutObjectAcl ("PUT /bucketid/objectid?acl&versionID=..."), etc.
423 s3ErrorResponse(w, InvalidRequest, "API not supported", r.URL.Path+"?"+r.URL.RawQuery, http.StatusBadRequest)
426 if !objectNameGiven {
427 s3ErrorResponse(w, InvalidArgument, "Missing object name in PUT request.", r.URL.Path, http.StatusBadRequest)
431 if strings.HasSuffix(fspath, "/") {
432 if !h.Cluster.Collections.S3FolderObjects {
433 s3ErrorResponse(w, InvalidArgument, "invalid object name: trailing slash", r.URL.Path, http.StatusBadRequest)
436 n, err := r.Body.Read(make([]byte, 1))
437 if err != nil && err != io.EOF {
438 s3ErrorResponse(w, InternalError, fmt.Sprintf("error reading request body: %s", err), r.URL.Path, http.StatusInternalServerError)
441 s3ErrorResponse(w, InvalidArgument, "cannot create object with trailing '/' char unless content is empty", r.URL.Path, http.StatusBadRequest)
443 } else if strings.SplitN(r.Header.Get("Content-Type"), ";", 2)[0] != "application/x-directory" {
444 s3ErrorResponse(w, InvalidArgument, "cannot create object with trailing '/' char unless Content-Type is 'application/x-directory'", r.URL.Path, http.StatusBadRequest)
447 // Given PUT "foo/bar/", we'll use "foo/bar/."
448 // in the "ensure parents exist" block below,
449 // and then we'll be done.
453 fi, err := fs.Stat(fspath)
454 if err != nil && err.Error() == "not a directory" {
455 // requested foo/bar, but foo is a file
456 s3ErrorResponse(w, InvalidArgument, "object name conflicts with existing object", r.URL.Path, http.StatusBadRequest)
459 if strings.HasSuffix(r.URL.Path, "/") && err == nil && !fi.IsDir() {
460 // requested foo/bar/, but foo/bar is a file
461 s3ErrorResponse(w, InvalidArgument, "object name conflicts with existing object", r.URL.Path, http.StatusBadRequest)
464 // create missing parent/intermediate directories, if any
465 for i, c := range fspath {
466 if i > 0 && c == '/' {
468 if strings.HasSuffix(dir, "/") {
469 err = errors.New("invalid object name (consecutive '/' chars)")
470 s3ErrorResponse(w, InvalidArgument, err.Error(), r.URL.Path, http.StatusBadRequest)
473 err = fs.Mkdir(dir, 0755)
474 if errors.Is(err, arvados.ErrInvalidArgument) || errors.Is(err, arvados.ErrInvalidOperation) {
475 // Cannot create a directory
477 err = fmt.Errorf("mkdir %q failed: %w", dir, err)
478 s3ErrorResponse(w, InvalidArgument, err.Error(), r.URL.Path, http.StatusBadRequest)
480 } else if err != nil && !os.IsExist(err) {
481 err = fmt.Errorf("mkdir %q failed: %w", dir, err)
482 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
488 f, err := fs.OpenFile(fspath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644)
489 if os.IsNotExist(err) {
490 f, err = fs.OpenFile(fspath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644)
493 err = fmt.Errorf("open %q failed: %w", r.URL.Path, err)
494 s3ErrorResponse(w, InvalidArgument, err.Error(), r.URL.Path, http.StatusBadRequest)
499 if !h.userPermittedToUploadOrDownload(r.Method, tokenUser) {
500 http.Error(w, "Not permitted", http.StatusForbidden)
503 h.logUploadOrDownload(r, sess.arvadosclient, fs, fspath, nil, tokenUser)
505 _, err = io.Copy(f, r.Body)
507 err = fmt.Errorf("write to %q failed: %w", r.URL.Path, err)
508 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
513 err = fmt.Errorf("write to %q failed: close: %w", r.URL.Path, err)
514 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
518 err = h.syncCollection(fs, readfs, fspath)
520 err = fmt.Errorf("sync failed: %w", err)
521 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
524 w.WriteHeader(http.StatusOK)
526 case r.Method == http.MethodDelete:
527 if reRawQueryIndicatesAPI.MatchString(r.URL.RawQuery) {
528 // DeleteObjectTagging ("DELETE /bucketid/objectid?tagging&versionID=..."), etc.
529 s3ErrorResponse(w, InvalidRequest, "API not supported", r.URL.Path+"?"+r.URL.RawQuery, http.StatusBadRequest)
532 if !objectNameGiven || r.URL.Path == "/" {
533 s3ErrorResponse(w, InvalidArgument, "missing object name in DELETE request", r.URL.Path, http.StatusBadRequest)
536 if strings.HasSuffix(fspath, "/") {
537 fspath = strings.TrimSuffix(fspath, "/")
538 fi, err := fs.Stat(fspath)
539 if os.IsNotExist(err) {
540 w.WriteHeader(http.StatusNoContent)
542 } else if err != nil {
543 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
545 } else if !fi.IsDir() {
546 // if "foo" exists and is a file, then
547 // "foo/" doesn't exist, so we say
548 // delete was successful.
549 w.WriteHeader(http.StatusNoContent)
552 } else if fi, err := fs.Stat(fspath); err == nil && fi.IsDir() {
553 // if "foo" is a dir, it is visible via S3
554 // only as "foo/", not "foo" -- so we leave
555 // the dir alone and return 204 to indicate
556 // that "foo" does not exist.
557 w.WriteHeader(http.StatusNoContent)
560 err = fs.Remove(fspath)
561 if os.IsNotExist(err) {
562 w.WriteHeader(http.StatusNoContent)
566 err = fmt.Errorf("rm failed: %w", err)
567 s3ErrorResponse(w, InvalidArgument, err.Error(), r.URL.Path, http.StatusBadRequest)
570 err = h.syncCollection(fs, readfs, fspath)
572 err = fmt.Errorf("sync failed: %w", err)
573 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
576 w.WriteHeader(http.StatusNoContent)
579 s3ErrorResponse(w, InvalidRequest, "method not allowed", r.URL.Path, http.StatusMethodNotAllowed)
584 func (h *handler) syncCollection(srcfs, dstfs arvados.CustomFileSystem, path string) error {
585 coll, _ := h.determineCollection(srcfs, path)
586 if coll == nil || coll.UUID == "" {
587 return errors.New("could not determine collection to sync")
589 d, err := srcfs.OpenFile("by_id/"+coll.UUID, os.O_RDWR, 0777)
595 snap, err := d.Snapshot()
599 dstd, err := dstfs.OpenFile("by_id/"+coll.UUID, os.O_RDWR, 0777)
604 return dstd.Splice(snap)
607 func setFileInfoHeaders(header http.Header, fs arvados.CustomFileSystem, path string) error {
608 maybeEncode := func(s string) string {
609 for _, c := range s {
610 if c > '\u007f' || c < ' ' {
611 return mime.BEncoding.Encode("UTF-8", s)
616 path = strings.TrimSuffix(path, "/")
617 var props map[string]interface{}
619 fi, err := fs.Stat(path)
623 switch src := fi.Sys().(type) {
624 case *arvados.Collection:
625 props = src.Properties
627 props = src.Properties
629 if err, ok := src.(error); ok {
633 cut := strings.LastIndexByte(path, '/')
642 for k, v := range props {
643 if !validMIMEHeaderKey(k) {
646 k = "x-amz-meta-" + k
647 if s, ok := v.(string); ok {
648 header.Set(k, maybeEncode(s))
649 } else if j, err := json.Marshal(v); err == nil {
650 header.Set(k, maybeEncode(string(j)))
656 func validMIMEHeaderKey(k string) bool {
658 return check != textproto.CanonicalMIMEHeaderKey(check)
661 // Call fn on the given path (directory) and its contents, in
662 // lexicographic order.
664 // If isRoot==true and path is not a directory, return nil.
666 // If fn returns filepath.SkipDir when called on a directory, don't
667 // descend into that directory.
668 func walkFS(fs arvados.CustomFileSystem, path string, isRoot bool, fn func(path string, fi os.FileInfo) error) error {
670 fi, err := fs.Stat(path)
671 if os.IsNotExist(err) || (err == nil && !fi.IsDir()) {
673 } else if err != nil {
677 if err == filepath.SkipDir {
679 } else if err != nil {
683 f, err := fs.Open(path)
684 if os.IsNotExist(err) && isRoot {
686 } else if err != nil {
687 return fmt.Errorf("open %q: %w", path, err)
693 fis, err := f.Readdir(-1)
697 sort.Slice(fis, func(i, j int) bool { return fis[i].Name() < fis[j].Name() })
698 for _, fi := range fis {
699 err = fn(path+"/"+fi.Name(), fi)
700 if err == filepath.SkipDir {
702 } else if err != nil {
706 err = walkFS(fs, path+"/"+fi.Name(), false, fn)
715 var errDone = errors.New("done")
717 func (h *handler) s3list(bucket string, w http.ResponseWriter, r *http.Request, fs arvados.CustomFileSystem) {
723 marker string // decoded continuationToken (v2) or provided by client (v1)
724 startAfter string // v2
725 continuationToken string // v2
726 encodingTypeURL bool // v2
728 params.delimiter = r.FormValue("delimiter")
729 if mk, _ := strconv.ParseInt(r.FormValue("max-keys"), 10, 64); mk > 0 && mk < s3MaxKeys {
730 params.maxKeys = int(mk)
732 params.maxKeys = s3MaxKeys
734 params.prefix = r.FormValue("prefix")
735 switch r.FormValue("list-type") {
740 http.Error(w, "invalid list-type parameter", http.StatusBadRequest)
744 params.continuationToken = r.FormValue("continuation-token")
745 marker, err := base64.StdEncoding.DecodeString(params.continuationToken)
747 http.Error(w, "invalid continuation token", http.StatusBadRequest)
750 params.marker = string(marker)
751 params.startAfter = r.FormValue("start-after")
752 switch r.FormValue("encoding-type") {
755 params.encodingTypeURL = true
757 http.Error(w, "invalid encoding-type parameter", http.StatusBadRequest)
761 params.marker = r.FormValue("marker")
764 bucketdir := "by_id/" + bucket
765 // walkpath is the directory (relative to bucketdir) we need
766 // to walk: the innermost directory that is guaranteed to
767 // contain all paths that have the requested prefix. Examples:
768 // prefix "foo/bar" => walkpath "foo"
769 // prefix "foo/bar/" => walkpath "foo/bar"
770 // prefix "foo" => walkpath ""
771 // prefix "" => walkpath ""
772 walkpath := params.prefix
773 if cut := strings.LastIndex(walkpath, "/"); cut >= 0 {
774 walkpath = walkpath[:cut]
781 Prefix: params.prefix,
782 Delimiter: params.delimiter,
783 MaxKeys: params.maxKeys,
784 ContinuationToken: r.FormValue("continuation-token"),
785 StartAfter: params.startAfter,
789 commonPrefixes := map[string]bool{}
790 err := walkFS(fs, strings.TrimSuffix(bucketdir+"/"+walkpath, "/"), true, func(path string, fi os.FileInfo) error {
791 if path == bucketdir {
794 path = path[len(bucketdir)+1:]
795 filesize := fi.Size()
800 if len(path) <= len(params.prefix) {
801 if path > params.prefix[:len(path)] {
802 // with prefix "foobar", walking "fooz" means we're done
805 if path < params.prefix[:len(path)] {
806 // with prefix "foobar", walking "foobag" is pointless
807 return filepath.SkipDir
809 if fi.IsDir() && !strings.HasPrefix(params.prefix+"/", path) {
810 // with prefix "foo/bar", walking "fo"
811 // is pointless (but walking "foo" or
812 // "foo/bar" is necessary)
813 return filepath.SkipDir
815 if len(path) < len(params.prefix) {
816 // can't skip anything, and this entry
817 // isn't in the results, so just
822 if path[:len(params.prefix)] > params.prefix {
823 // with prefix "foobar", nothing we
824 // see after "foozzz" is relevant
828 if path < params.marker || path < params.prefix || path <= params.startAfter {
831 if fi.IsDir() && !h.Cluster.Collections.S3FolderObjects {
832 // Note we don't add anything to
833 // commonPrefixes here even if delimiter is
834 // "/". We descend into the directory, and
835 // return a commonPrefix only if we end up
836 // finding a regular file inside it.
839 if len(resp.Contents)+len(commonPrefixes) >= params.maxKeys {
840 resp.IsTruncated = true
841 if params.delimiter != "" || params.v2 {
846 if params.delimiter != "" {
847 idx := strings.Index(path[len(params.prefix):], params.delimiter)
849 // with prefix "foobar" and delimiter
850 // "z", when we hit "foobar/baz", we
851 // add "/baz" to commonPrefixes and
853 commonPrefixes[path[:len(params.prefix)+idx+1]] = true
854 return filepath.SkipDir
857 resp.Contents = append(resp.Contents, s3.Key{
859 LastModified: fi.ModTime().UTC().Format("2006-01-02T15:04:05.999") + "Z",
864 if err != nil && err != errDone {
865 http.Error(w, err.Error(), http.StatusInternalServerError)
868 if params.delimiter != "" {
869 resp.CommonPrefixes = make([]commonPrefix, 0, len(commonPrefixes))
870 for prefix := range commonPrefixes {
871 resp.CommonPrefixes = append(resp.CommonPrefixes, commonPrefix{prefix})
873 sort.Slice(resp.CommonPrefixes, func(i, j int) bool { return resp.CommonPrefixes[i].Prefix < resp.CommonPrefixes[j].Prefix })
875 resp.KeyCount = len(resp.Contents)
876 var respV1orV2 interface{}
878 if params.encodingTypeURL {
879 // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
880 // "If you specify the encoding-type request
881 // parameter, Amazon S3 includes this element in the
882 // response, and returns encoded key name values in
883 // the following response elements:
885 // Delimiter, Prefix, Key, and StartAfter.
889 // Valid Values: url"
891 // This is somewhat vague but in practice it appears
892 // to mean x-www-form-urlencoded as in RFC1866 8.2.1
893 // para 1 (encode space as "+") rather than straight
894 // percent-encoding as in RFC1738 2.2. Presumably,
895 // the intent is to allow the client to decode XML and
896 // then paste the strings directly into another URI
897 // query or POST form like "https://host/path?foo=" +
898 // foo + "&bar=" + bar.
899 resp.EncodingType = "url"
900 resp.Delimiter = url.QueryEscape(resp.Delimiter)
901 resp.Prefix = url.QueryEscape(resp.Prefix)
902 resp.StartAfter = url.QueryEscape(resp.StartAfter)
903 for i, ent := range resp.Contents {
904 ent.Key = url.QueryEscape(ent.Key)
905 resp.Contents[i] = ent
907 for i, ent := range resp.CommonPrefixes {
908 ent.Prefix = url.QueryEscape(ent.Prefix)
909 resp.CommonPrefixes[i] = ent
914 resp.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextMarker))
917 respV1orV2 = listV1Resp{
918 CommonPrefixes: resp.CommonPrefixes,
919 NextMarker: nextMarker,
920 KeyCount: resp.KeyCount,
921 ListResp: s3.ListResp{
922 IsTruncated: resp.IsTruncated,
924 Prefix: params.prefix,
925 Delimiter: params.delimiter,
926 Marker: params.marker,
927 MaxKeys: params.maxKeys,
928 Contents: resp.Contents,
933 w.Header().Set("Content-Type", "application/xml")
934 io.WriteString(w, xml.Header)
935 if err := xml.NewEncoder(w).Encode(respV1orV2); err != nil {
936 ctxlog.FromContext(r.Context()).WithError(err).Error("error writing xml response")