"encoding/xml"
"errors"
"fmt"
+ "hash"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
+ "regexp"
"sort"
"strconv"
"strings"
return h.Sum(nil)
}
+func hashdigest(h hash.Hash, payload string) string {
+ io.WriteString(h, payload)
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
// Signing key for given secret key and request attrs.
func s3signatureKey(key, datestamp, regionName, serviceName string) []byte {
return hmacstring("aws4_request",
return strings.Join(keys, "&")
}
-func s3signature(alg, secretKey, scope, signedHeaders string, r *http.Request) (string, error) {
+var reMultipleSlashChars = regexp.MustCompile(`//+`)
+
+func s3stringToSign(alg, scope, signedHeaders string, r *http.Request) (string, error) {
timefmt, timestr := "20060102T150405Z", r.Header.Get("X-Amz-Date")
if timestr == "" {
timefmt, timestr = time.RFC1123, r.Header.Get("Date")
var canonicalHeaders string
for _, h := range strings.Split(signedHeaders, ";") {
if h == "host" {
- canonicalHeaders += h + ":" + r.URL.Host + "\n"
+ canonicalHeaders += h + ":" + r.Host + "\n"
} else {
canonicalHeaders += h + ":" + r.Header.Get(h) + "\n"
}
}
- crhash := sha256.New()
- fmt.Fprintf(crhash, "%s\n%s\n%s\n%s\n%s\n%s", r.Method, r.URL.EscapedPath(), s3querystring(r.URL), canonicalHeaders, signedHeaders, r.Header.Get("X-Amz-Content-Sha256"))
- crdigest := fmt.Sprintf("%x", crhash.Sum(nil))
-
- payload := fmt.Sprintf("%s\n%s\n%s\n%s", alg, r.Header.Get("X-Amz-Date"), scope, crdigest)
+ normalizedURL := *r.URL
+ normalizedURL.RawPath = ""
+ normalizedURL.Path = reMultipleSlashChars.ReplaceAllString(normalizedURL.Path, "/")
+ canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, normalizedURL.EscapedPath(), s3querystring(r.URL), canonicalHeaders, signedHeaders, r.Header.Get("X-Amz-Content-Sha256"))
+ ctxlog.FromContext(r.Context()).Debugf("s3stringToSign: canonicalRequest %s", canonicalRequest)
+ return fmt.Sprintf("%s\n%s\n%s\n%s", alg, r.Header.Get("X-Amz-Date"), scope, hashdigest(sha256.New(), canonicalRequest)), nil
+}
+func s3signature(secretKey, scope, signedHeaders, stringToSign string) (string, error) {
// scope is {datestamp}/{region}/{service}/aws4_request
drs := strings.Split(scope, "/")
if len(drs) != 4 {
return "", fmt.Errorf("invalid scope %q", scope)
}
-
key := s3signatureKey(secretKey, drs[0], drs[1], drs[2])
- h := hmac.New(sha256.New, key)
- h.Write([]byte(payload))
- return fmt.Sprintf("%x", h.Sum(nil)), nil
+ return hashdigest(hmac.New(sha256.New, key), stringToSign), nil
}
// checks3signature verifies the given S3 V4 signature and returns the
ctxlog.FromContext(r.Context()).WithError(err).WithField("UUID", key).Info("token lookup failed")
return "", errors.New("invalid access key")
}
- expect, err := s3signature(s3SignAlgorithm, secret, scope, signedHeaders, r)
+ stringToSign, err := s3stringToSign(s3SignAlgorithm, scope, signedHeaders, r)
+ if err != nil {
+ return "", err
+ }
+ expect, err := s3signature(secret, scope, signedHeaders, stringToSign)
if err != nil {
return "", err
} else if expect != signature {
- return "", errors.New("signature does not match")
+ return "", fmt.Errorf("signature does not match (scope %q signedHeaders %q stringToSign %q)", scope, signedHeaders, stringToSign)
}
return secret, nil
}
+func s3ErrorResponse(w http.ResponseWriter, s3code string, message string, resource string, code int) {
+ w.Header().Set("Content-Type", "application/xml")
+ w.Header().Set("X-Content-Type-Options", "nosniff")
+ w.WriteHeader(code)
+ var errstruct struct {
+ Code string
+ Message string
+ Resource string
+ RequestId string
+ }
+ errstruct.Code = s3code
+ errstruct.Message = message
+ errstruct.Resource = resource
+ errstruct.RequestId = ""
+ enc := xml.NewEncoder(w)
+ fmt.Fprint(w, xml.Header)
+ enc.EncodeElement(errstruct, xml.StartElement{Name: xml.Name{Local: "Error"}})
+}
+
+var NoSuchKey = "NoSuchKey"
+var NoSuchBucket = "NoSuchBucket"
+var InvalidArgument = "InvalidArgument"
+var InternalError = "InternalError"
+var UnauthorizedAccess = "UnauthorizedAccess"
+var InvalidRequest = "InvalidRequest"
+var SignatureDoesNotMatch = "SignatureDoesNotMatch"
+
// serveS3 handles r and returns true if r is a request from an S3
// client, otherwise it returns false.
func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
if auth := r.Header.Get("Authorization"); strings.HasPrefix(auth, "AWS ") {
split := strings.SplitN(auth[4:], ":", 2)
if len(split) < 2 {
- http.Error(w, "malformed Authorization header", http.StatusUnauthorized)
+ s3ErrorResponse(w, InvalidRequest, "malformed Authorization header", r.URL.Path, http.StatusUnauthorized)
return true
}
token = split[0]
} else if strings.HasPrefix(auth, s3SignAlgorithm+" ") {
t, err := h.checks3signature(r)
if err != nil {
- http.Error(w, "signature verification failed: "+err.Error(), http.StatusForbidden)
+ s3ErrorResponse(w, SignatureDoesNotMatch, "signature verification failed: "+err.Error(), r.URL.Path, http.StatusForbidden)
return true
}
token = t
return false
}
- _, kc, client, release, err := h.getClients(r.Header.Get("X-Request-Id"), token)
- if err != nil {
- http.Error(w, "Pool failed: "+h.clientPool.Err().Error(), http.StatusInternalServerError)
- return true
+ var err error
+ var fs arvados.CustomFileSystem
+ if r.Method == http.MethodGet || r.Method == http.MethodHead {
+ // Use a single session (cached FileSystem) across
+ // multiple read requests.
+ fs, err = h.Config.Cache.GetSession(token)
+ if err != nil {
+ s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
+ return true
+ }
+ } else {
+ // Create a FileSystem for this request, to avoid
+ // exposing incomplete write operations to concurrent
+ // requests.
+ _, kc, client, release, err := h.getClients(r.Header.Get("X-Request-Id"), token)
+ if err != nil {
+ s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
+ return true
+ }
+ defer release()
+ fs = client.SiteFileSystem(kc)
+ fs.ForwardSlashNameSubstitution(h.Config.cluster.Collections.ForwardSlashNameSubstitution)
}
- defer release()
- fs := client.SiteFileSystem(kc)
- fs.ForwardSlashNameSubstitution(h.Config.cluster.Collections.ForwardSlashNameSubstitution)
-
- objectNameGiven := strings.Count(strings.TrimSuffix(r.URL.Path, "/"), "/") > 1
+ var objectNameGiven bool
+ var bucketName string
+ fspath := "/by_id"
+ if id := parseCollectionIDFromDNSName(r.Host); id != "" {
+ fspath += "/" + id
+ bucketName = id
+ objectNameGiven = strings.Count(strings.TrimSuffix(r.URL.Path, "/"), "/") > 0
+ } else {
+ bucketName = strings.SplitN(strings.TrimPrefix(r.URL.Path, "/"), "/", 2)[0]
+ objectNameGiven = strings.Count(strings.TrimSuffix(r.URL.Path, "/"), "/") > 1
+ }
+ fspath += reMultipleSlashChars.ReplaceAllString(r.URL.Path, "/")
switch {
case r.Method == http.MethodGet && !objectNameGiven:
fmt.Fprintln(w, `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`)
} else {
// ListObjects
- h.s3list(w, r, fs)
+ h.s3list(bucketName, w, r, fs)
}
return true
case r.Method == http.MethodGet || r.Method == http.MethodHead:
- fspath := "/by_id" + r.URL.Path
fi, err := fs.Stat(fspath)
if r.Method == "HEAD" && !objectNameGiven {
// HeadBucket
if err == nil && fi.IsDir() {
w.WriteHeader(http.StatusOK)
} else if os.IsNotExist(err) {
- w.WriteHeader(http.StatusNotFound)
+ s3ErrorResponse(w, NoSuchBucket, "The specified bucket does not exist.", r.URL.Path, http.StatusNotFound)
} else {
- http.Error(w, err.Error(), http.StatusBadGateway)
+ s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
}
return true
}
if os.IsNotExist(err) ||
(err != nil && err.Error() == "not a directory") ||
(fi != nil && fi.IsDir()) {
- http.Error(w, "not found", http.StatusNotFound)
+ s3ErrorResponse(w, NoSuchKey, "The specified key does not exist.", r.URL.Path, http.StatusNotFound)
return true
}
// shallow copy r, and change URL path
return true
case r.Method == http.MethodPut:
if !objectNameGiven {
- http.Error(w, "missing object name in PUT request", http.StatusBadRequest)
+ s3ErrorResponse(w, InvalidArgument, "Missing object name in PUT request.", r.URL.Path, http.StatusBadRequest)
return true
}
- fspath := "by_id" + r.URL.Path
var objectIsDir bool
if strings.HasSuffix(fspath, "/") {
if !h.Config.cluster.Collections.S3FolderObjects {
- http.Error(w, "invalid object name: trailing slash", http.StatusBadRequest)
+ s3ErrorResponse(w, InvalidArgument, "invalid object name: trailing slash", r.URL.Path, http.StatusBadRequest)
return true
}
n, err := r.Body.Read(make([]byte, 1))
if err != nil && err != io.EOF {
- http.Error(w, fmt.Sprintf("error reading request body: %s", err), http.StatusInternalServerError)
+ s3ErrorResponse(w, InternalError, fmt.Sprintf("error reading request body: %s", err), r.URL.Path, http.StatusInternalServerError)
return true
} else if n > 0 {
- http.Error(w, "cannot create object with trailing '/' char unless content is empty", http.StatusBadRequest)
+ s3ErrorResponse(w, InvalidArgument, "cannot create object with trailing '/' char unless content is empty", r.URL.Path, http.StatusBadRequest)
return true
} else if strings.SplitN(r.Header.Get("Content-Type"), ";", 2)[0] != "application/x-directory" {
- http.Error(w, "cannot create object with trailing '/' char unless Content-Type is 'application/x-directory'", http.StatusBadRequest)
+ s3ErrorResponse(w, InvalidArgument, "cannot create object with trailing '/' char unless Content-Type is 'application/x-directory'", r.URL.Path, http.StatusBadRequest)
return true
}
// Given PUT "foo/bar/", we'll use "foo/bar/."
fi, err := fs.Stat(fspath)
if err != nil && err.Error() == "not a directory" {
// requested foo/bar, but foo is a file
- http.Error(w, "object name conflicts with existing object", http.StatusBadRequest)
+ s3ErrorResponse(w, InvalidArgument, "object name conflicts with existing object", r.URL.Path, http.StatusBadRequest)
return true
}
if strings.HasSuffix(r.URL.Path, "/") && err == nil && !fi.IsDir() {
// requested foo/bar/, but foo/bar is a file
- http.Error(w, "object name conflicts with existing object", http.StatusBadRequest)
+ s3ErrorResponse(w, InvalidArgument, "object name conflicts with existing object", r.URL.Path, http.StatusBadRequest)
return true
}
// create missing parent/intermediate directories, if any
dir := fspath[:i]
if strings.HasSuffix(dir, "/") {
err = errors.New("invalid object name (consecutive '/' chars)")
- http.Error(w, err.Error(), http.StatusBadRequest)
+ s3ErrorResponse(w, InvalidArgument, err.Error(), r.URL.Path, http.StatusBadRequest)
return true
}
err = fs.Mkdir(dir, 0755)
// Cannot create a directory
// here.
err = fmt.Errorf("mkdir %q failed: %w", dir, err)
- http.Error(w, err.Error(), http.StatusBadRequest)
+ s3ErrorResponse(w, InvalidArgument, err.Error(), r.URL.Path, http.StatusBadRequest)
return true
} else if err != nil && !os.IsExist(err) {
err = fmt.Errorf("mkdir %q failed: %w", dir, err)
- http.Error(w, err.Error(), http.StatusInternalServerError)
+ s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
return true
}
}
}
if err != nil {
err = fmt.Errorf("open %q failed: %w", r.URL.Path, err)
- http.Error(w, err.Error(), http.StatusBadRequest)
+ s3ErrorResponse(w, InvalidArgument, err.Error(), r.URL.Path, http.StatusBadRequest)
return true
}
defer f.Close()
_, err = io.Copy(f, r.Body)
if err != nil {
err = fmt.Errorf("write to %q failed: %w", r.URL.Path, err)
- http.Error(w, err.Error(), http.StatusBadGateway)
+ s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
return true
}
err = f.Close()
if err != nil {
err = fmt.Errorf("write to %q failed: close: %w", r.URL.Path, err)
- http.Error(w, err.Error(), http.StatusBadGateway)
+ s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusBadGateway)
return true
}
}
err = fs.Sync()
if err != nil {
err = fmt.Errorf("sync failed: %w", err)
- http.Error(w, err.Error(), http.StatusInternalServerError)
+ s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
return true
}
+ // Ensure a subsequent read operation will see the changes.
+ h.Config.Cache.ResetSession(token)
w.WriteHeader(http.StatusOK)
return true
case r.Method == http.MethodDelete:
if !objectNameGiven || r.URL.Path == "/" {
- http.Error(w, "missing object name in DELETE request", http.StatusBadRequest)
+ s3ErrorResponse(w, InvalidArgument, "missing object name in DELETE request", r.URL.Path, http.StatusBadRequest)
return true
}
- fspath := "by_id" + r.URL.Path
if strings.HasSuffix(fspath, "/") {
fspath = strings.TrimSuffix(fspath, "/")
fi, err := fs.Stat(fspath)
w.WriteHeader(http.StatusNoContent)
return true
} else if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
+ s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
return true
} else if !fi.IsDir() {
// if "foo" exists and is a file, then
}
if err != nil {
err = fmt.Errorf("rm failed: %w", err)
- http.Error(w, err.Error(), http.StatusBadRequest)
+ s3ErrorResponse(w, InvalidArgument, err.Error(), r.URL.Path, http.StatusBadRequest)
return true
}
err = fs.Sync()
if err != nil {
err = fmt.Errorf("sync failed: %w", err)
- http.Error(w, err.Error(), http.StatusInternalServerError)
+ s3ErrorResponse(w, InternalError, err.Error(), r.URL.Path, http.StatusInternalServerError)
return true
}
+ // Ensure a subsequent read operation will see the changes.
+ h.Config.Cache.ResetSession(token)
w.WriteHeader(http.StatusNoContent)
return true
default:
- http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
+ s3ErrorResponse(w, InvalidRequest, "method not allowed", r.URL.Path, http.StatusMethodNotAllowed)
return true
}
}
var errDone = errors.New("done")
-func (h *handler) s3list(w http.ResponseWriter, r *http.Request, fs arvados.CustomFileSystem) {
+func (h *handler) s3list(bucket string, w http.ResponseWriter, r *http.Request, fs arvados.CustomFileSystem) {
var params struct {
- bucket string
delimiter string
marker string
maxKeys int
prefix string
}
- params.bucket = strings.SplitN(r.URL.Path[1:], "/", 2)[0]
params.delimiter = r.FormValue("delimiter")
params.marker = r.FormValue("marker")
if mk, _ := strconv.ParseInt(r.FormValue("max-keys"), 10, 64); mk > 0 && mk < s3MaxKeys {
}
params.prefix = r.FormValue("prefix")
- bucketdir := "by_id/" + params.bucket
+ bucketdir := "by_id/" + bucket
// walkpath is the directory (relative to bucketdir) we need
// to walk: the innermost directory that is guaranteed to
// contain all paths that have the requested prefix. Examples:
// github.com/aws/aws-sdk-net never terminates its
// paging loop).
NextMarker string `xml:"NextMarker,omitempty"`
+ // ListObjectsV2 has a KeyCount response field.
+ KeyCount int
}
resp := listResp{
ListResp: s3.ListResp{
- Name: strings.SplitN(r.URL.Path[1:], "/", 2)[0],
+ Name: bucket,
Prefix: params.prefix,
Delimiter: params.delimiter,
Marker: params.marker,
}
sort.Slice(resp.CommonPrefixes, func(i, j int) bool { return resp.CommonPrefixes[i].Prefix < resp.CommonPrefixes[j].Prefix })
}
+ resp.KeyCount = len(resp.Contents)
w.Header().Set("Content-Type", "application/xml")
io.WriteString(w, xml.Header)
if err := xml.NewEncoder(w).Encode(resp); err != nil {