1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
29 "git.arvados.org/arvados.git/sdk/go/arvados"
30 "git.arvados.org/arvados.git/sdk/go/ctxlog"
35 s3SignAlgorithm = "AWS4-HMAC-SHA256"
36 s3MaxClockSkew = 5 * time.Minute
39 type commonPrefix struct {
43 type listV1Resp struct {
44 XMLName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
52 // If we use a []string here, xml marshals an empty tag when
53 // CommonPrefixes is nil, which confuses some clients. Fix by
54 // using this nested struct instead.
55 CommonPrefixes []commonPrefix
56 // Similarly, we need omitempty here, because an empty
57 // tag confuses some clients (e.g.,
58 // github.com/aws/aws-sdk-net never terminates its
60 NextMarker string `xml:"NextMarker,omitempty"`
61 // ListObjectsV2 has a KeyCount response field.
65 type listV2Resp struct {
66 XMLName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"`
73 CommonPrefixes []commonPrefix
74 EncodingType string `xml:",omitempty"`
76 ContinuationToken string `xml:",omitempty"`
77 NextContinuationToken string `xml:",omitempty"`
78 StartAfter string `xml:",omitempty"`
85 // The following fields are not populated, but are here in
86 // case clients rely on the keys being present in xml
96 func hmacstring(msg string, key []byte) []byte {
97 h := hmac.New(sha256.New, key)
98 io.WriteString(h, msg)
102 func hashdigest(h hash.Hash, payload string) string {
103 io.WriteString(h, payload)
104 return fmt.Sprintf("%x", h.Sum(nil))
107 // Signing key for given secret key and request attrs.
108 func s3signatureKey(key, datestamp, regionName, serviceName string) []byte {
109 return hmacstring("aws4_request",
110 hmacstring(serviceName,
111 hmacstring(regionName,
112 hmacstring(datestamp, []byte("AWS4"+key)))))
115 // Canonical query string for S3 V4 signature: sorted keys, spaces
116 // escaped as %20 instead of +, keyvalues joined with &.
117 func s3querystring(u *url.URL) string {
118 keys := make([]string, 0, len(u.Query()))
119 values := make(map[string]string, len(u.Query()))
120 for k, vs := range u.Query() {
121 k = strings.Replace(url.QueryEscape(k), "+", "%20", -1)
122 keys = append(keys, k)
123 for _, v := range vs {
124 v = strings.Replace(url.QueryEscape(v), "+", "%20", -1)
128 values[k] += k + "=" + v
132 for i, k := range keys {
135 return strings.Join(keys, "&")
138 var reMultipleSlashChars = regexp.MustCompile(`//+`)
140 func s3stringToSign(alg, scope, signedHeaders string, r *http.Request) (string, error) {
141 timefmt, timestr := "20060102T150405Z", r.Header.Get("X-Amz-Date")
143 timefmt, timestr = time.RFC1123, r.Header.Get("Date")
145 t, err := time.Parse(timefmt, timestr)
147 return "", fmt.Errorf("invalid timestamp %q: %s", timestr, err)
149 if skew := time.Now().Sub(t); skew < -s3MaxClockSkew || skew > s3MaxClockSkew {
150 return "", errors.New("exceeded max clock skew")
153 var canonicalHeaders string
154 for _, h := range strings.Split(signedHeaders, ";") {
156 canonicalHeaders += h + ":" + r.Host + "\n"
158 canonicalHeaders += h + ":" + r.Header.Get(h) + "\n"
162 normalizedPath := normalizePath(r.URL.Path)
163 ctxlog.FromContext(r.Context()).Debugf("normalizedPath %q", normalizedPath)
164 canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, normalizedPath, s3querystring(r.URL), canonicalHeaders, signedHeaders, r.Header.Get("X-Amz-Content-Sha256"))
165 ctxlog.FromContext(r.Context()).Debugf("s3stringToSign: canonicalRequest %s", canonicalRequest)
166 return fmt.Sprintf("%s\n%s\n%s\n%s", alg, r.Header.Get("X-Amz-Date"), scope, hashdigest(sha256.New(), canonicalRequest)), nil
169 func normalizePath(s string) string {
170 // (url.URL).EscapedPath() would be incorrect here. AWS
171 // documentation specifies the URL path should be normalized
172 // according to RFC 3986, i.e., unescaping ALPHA / DIGIT / "-"
173 // / "." / "_" / "~". The implication is that everything other
174 // than those chars (and "/") _must_ be percent-encoded --
175 // even chars like ";" and "," that are not normally
176 // percent-encoded in paths.
178 for _, c := range []byte(reMultipleSlashChars.ReplaceAllString(s, "/")) {
179 if (c >= 'a' && c <= 'z') ||
180 (c >= 'A' && c <= 'Z') ||
181 (c >= '0' && c <= '9') ||
189 out += fmt.Sprintf("%%%02X", c)
195 func s3signature(secretKey, scope, signedHeaders, stringToSign string) (string, error) {
196 // scope is {datestamp}/{region}/{service}/aws4_request
197 drs := strings.Split(scope, "/")
199 return "", fmt.Errorf("invalid scope %q", scope)
201 key := s3signatureKey(secretKey, drs[0], drs[1], drs[2])
202 return hashdigest(hmac.New(sha256.New, key), stringToSign), nil
205 var v2tokenUnderscore = regexp.MustCompile(`^v2_[a-z0-9]{5}-gj3su-[a-z0-9]{15}_`)
207 func unescapeKey(key string) string {
208 if v2tokenUnderscore.MatchString(key) {
209 // Entire Arvados token, with "/" replaced by "_" to
210 // avoid colliding with the Authorization header
212 return strings.Replace(key, "_", "/", -1)
213 } else if s, err := url.PathUnescape(key); err == nil {
220 // checks3signature verifies the given S3 V4 signature and returns the
221 // Arvados token that corresponds to the given accessKey. An error is
222 // returned if accessKey is not a valid token UUID or the signature
224 func (h *handler) checks3signature(r *http.Request) (string, error) {
225 var key, scope, signedHeaders, signature string
226 authstring := strings.TrimPrefix(r.Header.Get("Authorization"), s3SignAlgorithm+" ")
227 for _, cmpt := range strings.Split(authstring, ",") {
228 cmpt = strings.TrimSpace(cmpt)
229 split := strings.SplitN(cmpt, "=", 2)
231 case len(split) != 2:
233 case split[0] == "Credential":
234 keyandscope := strings.SplitN(split[1], "/", 2)
235 if len(keyandscope) == 2 {
236 key, scope = keyandscope[0], keyandscope[1]
238 case split[0] == "SignedHeaders":
239 signedHeaders = split[1]
240 case split[0] == "Signature":
245 client := (&arvados.Client{
246 APIHost: h.Cluster.Services.Controller.ExternalURL.Host,
247 Insecure: h.Cluster.TLS.Insecure,
248 }).WithRequestID(r.Header.Get("X-Request-Id"))
249 var aca arvados.APIClientAuthorization
252 if len(key) == 27 && key[5:12] == "-gj3su-" {
253 // Access key is the UUID of an Arvados token, secret
254 // key is the secret part.
255 ctx := arvados.ContextWithAuthorization(r.Context(), "Bearer "+h.Cluster.SystemRootToken)
256 err = client.RequestAndDecodeContext(ctx, &aca, "GET", "arvados/v1/api_client_authorizations/"+key, nil, nil)
257 secret = aca.APIToken
259 // Access key and secret key are both an entire
260 // Arvados token or OIDC access token.
261 ctx := arvados.ContextWithAuthorization(r.Context(), "Bearer "+unescapeKey(key))
262 err = client.RequestAndDecodeContext(ctx, &aca, "GET", "arvados/v1/api_client_authorizations/current", nil, nil)
266 ctxlog.FromContext(r.Context()).WithError(err).WithField("UUID", key).Info("token lookup failed")
267 return "", errors.New("invalid access key")
269 stringToSign, err := s3stringToSign(s3SignAlgorithm, scope, signedHeaders, r)
273 expect, err := s3signature(secret, scope, signedHeaders, stringToSign)
276 } else if expect != signature {
277 return "", fmt.Errorf("signature does not match (scope %q signedHeaders %q stringToSign %q)", scope, signedHeaders, stringToSign)
279 return aca.TokenV2(), nil
282 func s3ErrorResponse(w http.ResponseWriter, s3code string, message string, resource string, code int) {
283 w.Header().Set("Content-Type", "application/xml")
284 w.Header().Set("X-Content-Type-Options", "nosniff")
286 var errstruct struct {
292 errstruct.Code = s3code
293 errstruct.Message = message
294 errstruct.Resource = resource
295 errstruct.RequestId = ""
296 enc := xml.NewEncoder(w)
297 fmt.Fprint(w, xml.Header)
298 enc.EncodeElement(errstruct, xml.StartElement{Name: xml.Name{Local: "Error"}})
301 var NoSuchKey = "NoSuchKey"
302 var NoSuchBucket = "NoSuchBucket"
303 var InvalidArgument = "InvalidArgument"
304 var InternalError = "InternalError"
305 var UnauthorizedAccess = "UnauthorizedAccess"
306 var InvalidRequest = "InvalidRequest"
307 var SignatureDoesNotMatch = "SignatureDoesNotMatch"
309 var reRawQueryIndicatesAPI = regexp.MustCompile(`^[a-z]+(&|$)`)
311 // serveS3 handles r and returns true if r is a request from an S3
312 // client, otherwise it returns false.
313 func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
315 if auth := r.Header.Get("Authorization"); strings.HasPrefix(auth, "AWS ") {
316 split := strings.SplitN(auth[4:], ":", 2)
318 s3ErrorResponse(w, InvalidRequest, "malformed Authorization header", r.URL.Path, http.StatusUnauthorized)
321 token = unescapeKey(split[0])
322 } else if strings.HasPrefix(auth, s3SignAlgorithm+" ") {
323 t, err := h.checks3signature(r)
325 s3ErrorResponse(w, SignatureDoesNotMatch, "signature verification failed: "+err.Error(), r.URL.Path, http.StatusForbidden)
333 fs, sess, tokenUser, err := h.Cache.GetSession(token)
335 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
340 if writeMethod[r.Method] {
341 // Create a FileSystem for this request, to avoid
342 // exposing incomplete write operations to concurrent
344 client := sess.client.WithRequestID(r.Header.Get("X-Request-Id"))
345 fs = client.SiteFileSystem(sess.keepclient)
346 fs.ForwardSlashNameSubstitution(h.Cluster.Collections.ForwardSlashNameSubstitution)
349 var objectNameGiven bool
350 var bucketName string
352 if id := arvados.CollectionIDFromDNSName(r.Host); id != "" {
355 objectNameGiven = strings.Count(strings.TrimSuffix(r.URL.Path, "/"), "/") > 0
357 bucketName = strings.SplitN(strings.TrimPrefix(r.URL.Path, "/"), "/", 2)[0]
358 objectNameGiven = strings.Count(strings.TrimSuffix(r.URL.Path, "/"), "/") > 1
360 fspath += reMultipleSlashChars.ReplaceAllString(r.URL.Path, "/")
363 case r.Method == http.MethodGet && !objectNameGiven:
364 // Path is "/{uuid}" or "/{uuid}/", has no object name
365 if _, ok := r.URL.Query()["versioning"]; ok {
366 // GetBucketVersioning
367 w.Header().Set("Content-Type", "application/xml")
368 io.WriteString(w, xml.Header)
369 fmt.Fprintln(w, `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`)
370 } else if _, ok = r.URL.Query()["location"]; ok {
372 w.Header().Set("Content-Type", "application/xml")
373 io.WriteString(w, xml.Header)
374 fmt.Fprintln(w, `<LocationConstraint><LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">`+
376 `</LocationConstraint></LocationConstraint>`)
377 } else if reRawQueryIndicatesAPI.MatchString(r.URL.RawQuery) {
378 // GetBucketWebsite ("GET /bucketid/?website"), GetBucketTagging, etc.
379 s3ErrorResponse(w, InvalidRequest, "API not supported", r.URL.Path+"?"+r.URL.RawQuery, http.StatusBadRequest)
382 h.s3list(bucketName, w, r, fs)
385 case r.Method == http.MethodGet || r.Method == http.MethodHead:
386 if reRawQueryIndicatesAPI.MatchString(r.URL.RawQuery) {
387 // GetObjectRetention ("GET /bucketid/objectid?retention&versionID=..."), etc.
388 s3ErrorResponse(w, InvalidRequest, "API not supported", r.URL.Path+"?"+r.URL.RawQuery, http.StatusBadRequest)
391 fi, err := fs.Stat(fspath)
392 if r.Method == "HEAD" && !objectNameGiven {
394 if err == nil && fi.IsDir() {
395 err = setFileInfoHeaders(w.Header(), fs, fspath)
397 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
400 w.WriteHeader(http.StatusOK)
401 } else if os.IsNotExist(err) {
402 s3ErrorResponse(w, NoSuchBucket, "The specified bucket does not exist.", r.URL.Path, http.StatusNotFound)
404 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
408 if err == nil && fi.IsDir() && objectNameGiven && strings.HasSuffix(fspath, "/") && h.Cluster.Collections.S3FolderObjects {
409 err = setFileInfoHeaders(w.Header(), fs, fspath)
411 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
414 w.Header().Set("Content-Type", "application/x-directory")
415 w.WriteHeader(http.StatusOK)
418 if os.IsNotExist(err) ||
419 (err != nil && err.Error() == "not a directory") ||
420 (fi != nil && fi.IsDir()) {
421 s3ErrorResponse(w, NoSuchKey, "The specified key does not exist.", r.URL.Path, http.StatusNotFound)
425 if !h.userPermittedToUploadOrDownload(r.Method, tokenUser) {
426 http.Error(w, "Not permitted", http.StatusForbidden)
429 h.logUploadOrDownload(r, sess.arvadosclient, fs, fspath, nil, tokenUser)
431 // shallow copy r, and change URL path
434 err = setFileInfoHeaders(w.Header(), fs, fspath)
436 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
439 http.FileServer(fs).ServeHTTP(w, &r)
441 case r.Method == http.MethodPut:
442 if reRawQueryIndicatesAPI.MatchString(r.URL.RawQuery) {
443 // PutObjectAcl ("PUT /bucketid/objectid?acl&versionID=..."), etc.
444 s3ErrorResponse(w, InvalidRequest, "API not supported", r.URL.Path+"?"+r.URL.RawQuery, http.StatusBadRequest)
447 if !objectNameGiven {
448 s3ErrorResponse(w, InvalidArgument, "Missing object name in PUT request.", r.URL.Path, http.StatusBadRequest)
452 if strings.HasSuffix(fspath, "/") {
453 if !h.Cluster.Collections.S3FolderObjects {
454 s3ErrorResponse(w, InvalidArgument, "invalid object name: trailing slash", r.URL.Path, http.StatusBadRequest)
457 n, err := r.Body.Read(make([]byte, 1))
458 if err != nil && err != io.EOF {
459 s3ErrorResponse(w, InternalError, fmt.Sprintf("error reading request body: %s", err), r.URL.Path, http.StatusInternalServerError)
462 s3ErrorResponse(w, InvalidArgument, "cannot create object with trailing '/' char unless content is empty", r.URL.Path, http.StatusBadRequest)
464 } else if strings.SplitN(r.Header.Get("Content-Type"), ";", 2)[0] != "application/x-directory" {
465 s3ErrorResponse(w, InvalidArgument, "cannot create object with trailing '/' char unless Content-Type is 'application/x-directory'", r.URL.Path, http.StatusBadRequest)
468 // Given PUT "foo/bar/", we'll use "foo/bar/."
469 // in the "ensure parents exist" block below,
470 // and then we'll be done.
474 fi, err := fs.Stat(fspath)
475 if err != nil && err.Error() == "not a directory" {
476 // requested foo/bar, but foo is a file
477 s3ErrorResponse(w, InvalidArgument, "object name conflicts with existing object", r.URL.Path, http.StatusBadRequest)
480 if strings.HasSuffix(r.URL.Path, "/") && err == nil && !fi.IsDir() {
481 // requested foo/bar/, but foo/bar is a file
482 s3ErrorResponse(w, InvalidArgument, "object name conflicts with existing object", r.URL.Path, http.StatusBadRequest)
485 // create missing parent/intermediate directories, if any
486 for i, c := range fspath {
487 if i > 0 && c == '/' {
489 if strings.HasSuffix(dir, "/") {
490 err = errors.New("invalid object name (consecutive '/' chars)")
491 s3ErrorResponse(w, InvalidArgument, err.Error(), r.URL.Path, http.StatusBadRequest)
494 err = fs.Mkdir(dir, 0755)
495 if errors.Is(err, arvados.ErrInvalidArgument) || errors.Is(err, arvados.ErrInvalidOperation) {
496 // Cannot create a directory
498 err = fmt.Errorf("mkdir %q failed: %w", dir, err)
499 s3ErrorResponse(w, InvalidArgument, err.Error(), r.URL.Path, http.StatusBadRequest)
501 } else if err != nil && !os.IsExist(err) {
502 err = fmt.Errorf("mkdir %q failed: %w", dir, err)
503 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
509 f, err := fs.OpenFile(fspath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644)
510 if os.IsNotExist(err) {
511 f, err = fs.OpenFile(fspath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644)
514 err = fmt.Errorf("open %q failed: %w", r.URL.Path, err)
515 s3ErrorResponse(w, InvalidArgument, err.Error(), r.URL.Path, http.StatusBadRequest)
520 if !h.userPermittedToUploadOrDownload(r.Method, tokenUser) {
521 http.Error(w, "Not permitted", http.StatusForbidden)
524 h.logUploadOrDownload(r, sess.arvadosclient, fs, fspath, nil, tokenUser)
526 _, err = io.Copy(f, r.Body)
528 err = fmt.Errorf("write to %q failed: %w", r.URL.Path, err)
529 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
534 err = fmt.Errorf("write to %q failed: close: %w", r.URL.Path, err)
535 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
539 err = h.syncCollection(fs, readfs, fspath)
541 err = fmt.Errorf("sync failed: %w", err)
542 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
545 w.WriteHeader(http.StatusOK)
547 case r.Method == http.MethodDelete:
548 if reRawQueryIndicatesAPI.MatchString(r.URL.RawQuery) {
549 // DeleteObjectTagging ("DELETE /bucketid/objectid?tagging&versionID=..."), etc.
550 s3ErrorResponse(w, InvalidRequest, "API not supported", r.URL.Path+"?"+r.URL.RawQuery, http.StatusBadRequest)
553 if !objectNameGiven || r.URL.Path == "/" {
554 s3ErrorResponse(w, InvalidArgument, "missing object name in DELETE request", r.URL.Path, http.StatusBadRequest)
557 if strings.HasSuffix(fspath, "/") {
558 fspath = strings.TrimSuffix(fspath, "/")
559 fi, err := fs.Stat(fspath)
560 if os.IsNotExist(err) {
561 w.WriteHeader(http.StatusNoContent)
563 } else if err != nil {
564 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
566 } else if !fi.IsDir() {
567 // if "foo" exists and is a file, then
568 // "foo/" doesn't exist, so we say
569 // delete was successful.
570 w.WriteHeader(http.StatusNoContent)
573 } else if fi, err := fs.Stat(fspath); err == nil && fi.IsDir() {
574 // if "foo" is a dir, it is visible via S3
575 // only as "foo/", not "foo" -- so we leave
576 // the dir alone and return 204 to indicate
577 // that "foo" does not exist.
578 w.WriteHeader(http.StatusNoContent)
581 err = fs.Remove(fspath)
582 if os.IsNotExist(err) {
583 w.WriteHeader(http.StatusNoContent)
587 err = fmt.Errorf("rm failed: %w", err)
588 s3ErrorResponse(w, InvalidArgument, err.Error(), r.URL.Path, http.StatusBadRequest)
591 err = h.syncCollection(fs, readfs, fspath)
593 err = fmt.Errorf("sync failed: %w", err)
594 s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
597 w.WriteHeader(http.StatusNoContent)
600 s3ErrorResponse(w, InvalidRequest, "method not allowed", r.URL.Path, http.StatusMethodNotAllowed)
605 // Save modifications to the indicated collection in srcfs, then (if
606 // successful) ensure they are also reflected in dstfs.
607 func (h *handler) syncCollection(srcfs, dstfs arvados.CustomFileSystem, path string) error {
608 coll, _ := h.determineCollection(srcfs, path)
609 if coll == nil || coll.UUID == "" {
610 return errors.New("could not determine collection to sync")
612 d, err := srcfs.OpenFile("by_id/"+coll.UUID, os.O_RDWR, 0777)
621 snap, err := d.Snapshot()
625 dstd, err := dstfs.OpenFile("by_id/"+coll.UUID, os.O_RDWR, 0777)
630 return dstd.Splice(snap)
633 func setFileInfoHeaders(header http.Header, fs arvados.CustomFileSystem, path string) error {
634 maybeEncode := func(s string) string {
635 for _, c := range s {
636 if c > '\u007f' || c < ' ' {
637 return mime.BEncoding.Encode("UTF-8", s)
642 path = strings.TrimSuffix(path, "/")
643 var props map[string]interface{}
645 fi, err := fs.Stat(path)
649 switch src := fi.Sys().(type) {
650 case *arvados.Collection:
651 props = src.Properties
653 props = src.Properties
655 if err, ok := src.(error); ok {
659 cut := strings.LastIndexByte(path, '/')
668 for k, v := range props {
669 if !validMIMEHeaderKey(k) {
672 k = "x-amz-meta-" + k
673 if s, ok := v.(string); ok {
674 header.Set(k, maybeEncode(s))
675 } else if j, err := json.Marshal(v); err == nil {
676 header.Set(k, maybeEncode(string(j)))
682 func validMIMEHeaderKey(k string) bool {
684 return check != textproto.CanonicalMIMEHeaderKey(check)
687 // Call fn on the given path (directory) and its contents, in
688 // lexicographic order.
690 // If isRoot==true and path is not a directory, return nil.
692 // If fn returns filepath.SkipDir when called on a directory, don't
693 // descend into that directory.
694 func walkFS(fs arvados.CustomFileSystem, path string, isRoot bool, fn func(path string, fi os.FileInfo) error) error {
696 fi, err := fs.Stat(path)
697 if os.IsNotExist(err) || (err == nil && !fi.IsDir()) {
699 } else if err != nil {
703 if err == filepath.SkipDir {
705 } else if err != nil {
709 f, err := fs.Open(path)
710 if os.IsNotExist(err) && isRoot {
712 } else if err != nil {
713 return fmt.Errorf("open %q: %w", path, err)
719 fis, err := f.Readdir(-1)
723 sort.Slice(fis, func(i, j int) bool { return fis[i].Name() < fis[j].Name() })
724 for _, fi := range fis {
725 err = fn(path+"/"+fi.Name(), fi)
726 if err == filepath.SkipDir {
728 } else if err != nil {
732 err = walkFS(fs, path+"/"+fi.Name(), false, fn)
741 var errDone = errors.New("done")
743 func (h *handler) s3list(bucket string, w http.ResponseWriter, r *http.Request, fs arvados.CustomFileSystem) {
749 marker string // decoded continuationToken (v2) or provided by client (v1)
750 startAfter string // v2
751 continuationToken string // v2
752 encodingTypeURL bool // v2
754 params.delimiter = r.FormValue("delimiter")
755 if mk, _ := strconv.ParseInt(r.FormValue("max-keys"), 10, 64); mk > 0 && mk < s3MaxKeys {
756 params.maxKeys = int(mk)
758 params.maxKeys = s3MaxKeys
760 params.prefix = r.FormValue("prefix")
761 switch r.FormValue("list-type") {
766 http.Error(w, "invalid list-type parameter", http.StatusBadRequest)
770 params.continuationToken = r.FormValue("continuation-token")
771 marker, err := base64.StdEncoding.DecodeString(params.continuationToken)
773 http.Error(w, "invalid continuation token", http.StatusBadRequest)
776 // marker and start-after perform the same function,
777 // but we keep them separate so we can repeat them
778 // back to the client in the response.
779 params.marker = string(marker)
780 params.startAfter = r.FormValue("start-after")
781 switch r.FormValue("encoding-type") {
784 params.encodingTypeURL = true
786 http.Error(w, "invalid encoding-type parameter", http.StatusBadRequest)
790 // marker is functionally equivalent to start-after.
791 params.marker = r.FormValue("marker")
794 // startAfter is params.marker or params.startAfter, whichever
796 startAfter := params.startAfter
797 if startAfter < params.marker {
798 startAfter = params.marker
801 bucketdir := "by_id/" + bucket
802 // walkpath is the directory (relative to bucketdir) we need
803 // to walk: the innermost directory that is guaranteed to
804 // contain all paths that have the requested prefix. Examples:
805 // prefix "foo/bar" => walkpath "foo"
806 // prefix "foo/bar/" => walkpath "foo/bar"
807 // prefix "foo" => walkpath ""
808 // prefix "" => walkpath ""
809 walkpath := params.prefix
810 if cut := strings.LastIndex(walkpath, "/"); cut >= 0 {
811 walkpath = walkpath[:cut]
818 Prefix: params.prefix,
819 Delimiter: params.delimiter,
820 MaxKeys: params.maxKeys,
821 ContinuationToken: r.FormValue("continuation-token"),
822 StartAfter: params.startAfter,
825 // nextMarker will be the last path we add to either
826 // resp.Contents or commonPrefixes. It will be included in
827 // the response as NextMarker or NextContinuationToken if
831 commonPrefixes := map[string]bool{}
833 err := walkFS(fs, strings.TrimSuffix(bucketdir+"/"+walkpath, "/"), true, func(path string, fi os.FileInfo) error {
834 if path == bucketdir {
837 path = path[len(bucketdir)+1:]
838 filesize := fi.Size()
843 if strings.HasPrefix(params.prefix, path) && params.prefix != path {
844 // Descend into subtree until we reach desired prefix
846 } else if path < params.prefix {
847 // Not an ancestor or descendant of desired
848 // prefix, therefore none of its descendants
849 // can be either -- skip
850 return filepath.SkipDir
851 } else if path > params.prefix && !strings.HasPrefix(path, params.prefix) {
852 // We must have traversed everything under
855 } else if path == startAfter {
856 // Skip startAfter itself, just descend into
859 } else if strings.HasPrefix(startAfter, path) {
860 // Descend into subtree in case it contains
861 // something after startAfter
863 } else if path < startAfter {
864 // Skip ahead until we reach startAfter
865 return filepath.SkipDir
867 if fi.IsDir() && !h.Cluster.Collections.S3FolderObjects {
868 // Note we don't add anything to
869 // commonPrefixes here even if delimiter is
870 // "/". We descend into the directory, and
871 // return a commonPrefix only if we end up
872 // finding a regular file inside it.
875 if params.delimiter != "" {
876 idx := strings.Index(path[len(params.prefix):], params.delimiter)
878 // with prefix "foobar" and delimiter
879 // "z", when we hit "foobar/baz", we
880 // add "/baz" to commonPrefixes and
882 prefix := path[:len(params.prefix)+idx+1]
883 if prefix == startAfter {
885 } else if prefix < startAfter && !strings.HasPrefix(startAfter, prefix) {
888 resp.IsTruncated = true
891 commonPrefixes[prefix] = true
893 full = len(resp.Contents)+len(commonPrefixes) >= params.maxKeys
894 return filepath.SkipDir
899 resp.IsTruncated = true
902 resp.Contents = append(resp.Contents, s3Key{
904 LastModified: fi.ModTime().UTC().Format("2006-01-02T15:04:05.999") + "Z",
908 full = len(resp.Contents)+len(commonPrefixes) >= params.maxKeys
911 if err != nil && err != errDone {
912 http.Error(w, err.Error(), http.StatusInternalServerError)
915 if params.delimiter == "" && !params.v2 || !resp.IsTruncated {
918 if params.delimiter != "" {
919 resp.CommonPrefixes = make([]commonPrefix, 0, len(commonPrefixes))
920 for prefix := range commonPrefixes {
921 resp.CommonPrefixes = append(resp.CommonPrefixes, commonPrefix{prefix})
923 sort.Slice(resp.CommonPrefixes, func(i, j int) bool { return resp.CommonPrefixes[i].Prefix < resp.CommonPrefixes[j].Prefix })
925 resp.KeyCount = len(resp.Contents)
926 var respV1orV2 interface{}
928 if params.encodingTypeURL {
929 // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
930 // "If you specify the encoding-type request
931 // parameter, Amazon S3 includes this element in the
932 // response, and returns encoded key name values in
933 // the following response elements:
935 // Delimiter, Prefix, Key, and StartAfter.
939 // Valid Values: url"
941 // This is somewhat vague but in practice it appears
942 // to mean x-www-form-urlencoded as in RFC1866 8.2.1
943 // para 1 (encode space as "+") rather than straight
944 // percent-encoding as in RFC1738 2.2. Presumably,
945 // the intent is to allow the client to decode XML and
946 // then paste the strings directly into another URI
947 // query or POST form like "https://host/path?foo=" +
948 // foo + "&bar=" + bar.
949 resp.EncodingType = "url"
950 resp.Delimiter = url.QueryEscape(resp.Delimiter)
951 resp.Prefix = url.QueryEscape(resp.Prefix)
952 resp.StartAfter = url.QueryEscape(resp.StartAfter)
953 for i, ent := range resp.Contents {
954 ent.Key = url.QueryEscape(ent.Key)
955 resp.Contents[i] = ent
957 for i, ent := range resp.CommonPrefixes {
958 ent.Prefix = url.QueryEscape(ent.Prefix)
959 resp.CommonPrefixes[i] = ent
964 resp.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextMarker))
967 respV1orV2 = listV1Resp{
968 CommonPrefixes: resp.CommonPrefixes,
969 NextMarker: nextMarker,
970 KeyCount: resp.KeyCount,
971 IsTruncated: resp.IsTruncated,
973 Prefix: params.prefix,
974 Delimiter: params.delimiter,
975 Marker: params.marker,
976 MaxKeys: params.maxKeys,
977 Contents: resp.Contents,
981 w.Header().Set("Content-Type", "application/xml")
982 io.WriteString(w, xml.Header)
983 if err := xml.NewEncoder(w).Encode(respV1orV2); err != nil {
984 ctxlog.FromContext(r.Context()).WithError(err).Error("error writing xml response")