1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
5 // Package keepstore implements the keepstore service component and
6 // back-end storage drivers.
8 // It is an internal module, only intended to be imported by
9 // /cmd/arvados-server and other server-side components in this
29 "git.arvados.org/arvados.git/sdk/go/arvados"
30 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
31 "git.arvados.org/arvados.git/sdk/go/auth"
32 "git.arvados.org/arvados.git/sdk/go/ctxlog"
33 "git.arvados.org/arvados.git/sdk/go/httpserver"
34 "git.arvados.org/arvados.git/sdk/go/keepclient"
35 "github.com/prometheus/client_golang/prometheus"
36 "github.com/sirupsen/logrus"
39 // Maximum size of a keep block is 64 MiB.
40 const BlockSize = 1 << 26
43 errChecksum = httpserver.ErrorWithStatus(errors.New("checksum mismatch in stored data"), http.StatusBadGateway)
44 errNoTokenProvided = httpserver.ErrorWithStatus(errors.New("no token provided in Authorization header"), http.StatusUnauthorized)
45 errMethodNotAllowed = httpserver.ErrorWithStatus(errors.New("method not allowed"), http.StatusMethodNotAllowed)
46 errVolumeUnavailable = httpserver.ErrorWithStatus(errors.New("volume unavailable"), http.StatusServiceUnavailable)
47 errCollision = httpserver.ErrorWithStatus(errors.New("hash collision"), http.StatusInternalServerError)
48 errExpiredSignature = httpserver.ErrorWithStatus(errors.New("expired signature"), http.StatusUnauthorized)
49 errInvalidSignature = httpserver.ErrorWithStatus(errors.New("invalid signature"), http.StatusBadRequest)
50 errInvalidLocator = httpserver.ErrorWithStatus(errors.New("invalid locator"), http.StatusBadRequest)
51 errFull = httpserver.ErrorWithStatus(errors.New("insufficient storage"), http.StatusInsufficientStorage)
52 errTooLarge = httpserver.ErrorWithStatus(errors.New("request entity too large"), http.StatusRequestEntityTooLarge)
53 driver = make(map[string]volumeDriver)
56 type indexOptions struct {
68 type keepstore struct {
69 cluster *arvados.Cluster
70 logger logrus.FieldLogger
71 serviceURL arvados.URL
72 mounts map[string]*mount
75 bufferPool *bufferPool
77 iostats map[volume]*ioStats
79 remoteClients map[string]*keepclient.KeepClient
80 remoteClientsMtx sync.Mutex
83 func newKeepstore(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry, serviceURL arvados.URL) (*keepstore, error) {
84 logger := ctxlog.FromContext(ctx)
86 if cluster.API.MaxConcurrentRequests > 0 && cluster.API.MaxConcurrentRequests < cluster.API.MaxKeepBlobBuffers {
87 logger.Warnf("Possible configuration mistake: not useful to set API.MaxKeepBlobBuffers (%d) higher than API.MaxConcurrentRequests (%d)", cluster.API.MaxKeepBlobBuffers, cluster.API.MaxConcurrentRequests)
90 if cluster.Collections.BlobSigningKey != "" {
91 } else if cluster.Collections.BlobSigning {
92 return nil, errors.New("cannot enable Collections.BlobSigning with no Collections.BlobSigningKey")
94 logger.Warn("Running without a blob signing key. Block locators returned by this server will not be signed, and will be rejected by a server that enforces permissions. To fix this, configure Collections.BlobSigning and Collections.BlobSigningKey.")
97 if cluster.API.MaxKeepBlobBuffers <= 0 {
98 return nil, fmt.Errorf("API.MaxKeepBlobBuffers must be greater than zero")
100 bufferPool := newBufferPool(logger, cluster.API.MaxKeepBlobBuffers, reg)
105 serviceURL: serviceURL,
106 bufferPool: bufferPool,
107 remoteClients: make(map[string]*keepclient.KeepClient),
110 err := ks.setupMounts(newVolumeMetricsVecs(reg))
118 func (ks *keepstore) setupMounts(metrics *volumeMetricsVecs) error {
119 ks.mounts = make(map[string]*mount)
120 if len(ks.cluster.Volumes) == 0 {
121 return errors.New("no volumes configured")
123 for uuid, cfgvol := range ks.cluster.Volumes {
124 va, ok := cfgvol.AccessViaHosts[ks.serviceURL]
125 if !ok && len(cfgvol.AccessViaHosts) > 0 {
128 dri, ok := driver[cfgvol.Driver]
130 return fmt.Errorf("volume %s: invalid driver %q", uuid, cfgvol.Driver)
132 vol, err := dri(newVolumeParams{
135 ConfigVolume: cfgvol,
137 MetricsVecs: metrics,
138 BufferPool: ks.bufferPool,
141 return fmt.Errorf("error initializing volume %s: %s", uuid, err)
143 sc := cfgvol.StorageClasses
145 sc = map[string]bool{"default": true}
147 repl := cfgvol.Replication
152 for class, in := range cfgvol.StorageClasses {
153 p := ks.cluster.StorageClasses[class].Priority
161 KeepMount: arvados.KeepMount{
163 DeviceID: vol.DeviceID(),
164 AllowWrite: !va.ReadOnly && !cfgvol.ReadOnly,
165 AllowTrash: !va.ReadOnly && (!cfgvol.ReadOnly || cfgvol.AllowTrashWhenReadOnly),
170 ks.mounts[uuid] = mnt
171 ks.logger.Printf("started volume %s (%s), AllowWrite=%v, AllowTrash=%v", uuid, vol.DeviceID(), mnt.AllowWrite, mnt.AllowTrash)
173 if len(ks.mounts) == 0 {
174 return fmt.Errorf("no volumes configured for %s", ks.serviceURL)
179 for _, mnt := range ks.mounts {
180 ks.mountsR = append(ks.mountsR, mnt)
182 ks.mountsW = append(ks.mountsW, mnt)
185 // Sorting mounts by UUID makes behavior more predictable, and
186 // is convenient for testing -- for example, "index all
187 // volumes" and "trash block on all volumes" will visit
188 // volumes in predictable order.
189 sort.Slice(ks.mountsR, func(i, j int) bool { return ks.mountsR[i].UUID < ks.mountsR[j].UUID })
190 sort.Slice(ks.mountsW, func(i, j int) bool { return ks.mountsW[i].UUID < ks.mountsW[j].UUID })
194 // checkLocatorSignature checks that locator has a valid signature.
195 // If the BlobSigning config is false, it returns nil even if the
196 // signature is invalid or missing.
197 func (ks *keepstore) checkLocatorSignature(ctx context.Context, locator string) error {
198 if !ks.cluster.Collections.BlobSigning {
201 token := ctxToken(ctx)
203 return errNoTokenProvided
205 err := arvados.VerifySignature(locator, token, ks.cluster.Collections.BlobSigningTTL.Duration(), []byte(ks.cluster.Collections.BlobSigningKey))
206 if err == arvados.ErrSignatureExpired {
207 return errExpiredSignature
208 } else if err != nil {
209 return errInvalidSignature
214 // signLocator signs the locator for the given token, if possible.
215 // Note this signs if the BlobSigningKey config is available, even if
216 // the BlobSigning config is false.
217 func (ks *keepstore) signLocator(token, locator string) string {
218 if token == "" || len(ks.cluster.Collections.BlobSigningKey) == 0 {
221 ttl := ks.cluster.Collections.BlobSigningTTL.Duration()
222 return arvados.SignLocator(locator, token, time.Now().Add(ttl), ttl, []byte(ks.cluster.Collections.BlobSigningKey))
225 func (ks *keepstore) BlockRead(ctx context.Context, opts arvados.BlockReadOptions) (n int, err error) {
226 li, err := getLocatorInfo(opts.Locator)
231 if rw, ok := out.(http.ResponseWriter); ok && li.size > 0 {
232 out = &setSizeOnWrite{ResponseWriter: rw, size: li.size}
234 if li.remote && !li.signed {
235 return ks.blockReadRemote(ctx, opts)
237 if err := ks.checkLocatorSignature(ctx, opts.Locator); err != nil {
240 hashcheck := md5.New()
242 out = newHashCheckWriter(out, hashcheck, int64(li.size), li.hash)
244 out = io.MultiWriter(out, hashcheck)
247 buf, err := ks.bufferPool.GetContext(ctx)
251 defer ks.bufferPool.Put(buf)
252 streamer := newStreamWriterAt(out, 65536, buf)
253 defer streamer.Close()
255 var errToCaller error = os.ErrNotExist
256 for _, mnt := range ks.rendezvous(li.hash, ks.mountsR) {
257 if ctx.Err() != nil {
260 err := mnt.BlockRead(ctx, li.hash, streamer)
262 if streamer.WroteAt() != 0 {
263 // BlockRead encountered an error
264 // after writing some data, so it's
265 // too late to try another
266 // volume. Flush streamer before
267 // calling Wrote() to ensure our
268 // return value accurately reflects
269 // the number of bytes written to
272 return streamer.Wrote(), err
274 if !os.IsNotExist(err) {
280 // hashCheckingWriter isn't in use because we
281 // don't know the expected size. All we can do
282 // is check after writing all the data, and
283 // trust the caller is doing a HEAD request so
284 // it's not too late to set an error code in
285 // the response header.
286 err = streamer.Close()
287 if hash := fmt.Sprintf("%x", hashcheck.Sum(nil)); hash != li.hash && err == nil {
290 if rw, ok := opts.WriteTo.(http.ResponseWriter); ok {
291 // We didn't set the content-length header
292 // above because we didn't know the block size
294 rw.Header().Set("Content-Length", fmt.Sprintf("%d", streamer.WroteAt()))
296 return streamer.WroteAt(), err
297 } else if streamer.WroteAt() != li.size {
298 // If the backend read fewer bytes than
299 // expected but returns no error, we can
300 // classify this as a checksum error (even
301 // though hashCheckWriter doesn't know that
302 // yet, it's just waiting for the next
303 // write). If our caller is serving a GET
304 // request it's too late to do anything about
305 // it anyway, but if it's a HEAD request the
306 // caller can still change the response status
308 return streamer.WroteAt(), errChecksum
310 // Ensure streamer flushes all buffered data without
312 err = streamer.Close()
313 return streamer.Wrote(), err
315 return 0, errToCaller
318 func (ks *keepstore) blockReadRemote(ctx context.Context, opts arvados.BlockReadOptions) (int, error) {
319 token := ctxToken(ctx)
321 return 0, errNoTokenProvided
323 var remoteClient *keepclient.KeepClient
325 li, err := getLocatorInfo(opts.Locator)
329 for i, part := range strings.Split(opts.Locator, "+") {
332 // don't try to parse hash part as hint
333 case strings.HasPrefix(part, "A"):
334 // drop local permission hint
336 case len(part) > 7 && part[0] == 'R' && part[6] == '-':
337 remoteID := part[1:6]
338 remote, ok := ks.cluster.RemoteClusters[remoteID]
340 return 0, httpserver.ErrorWithStatus(errors.New("remote cluster not configured"), http.StatusBadRequest)
342 kc, err := ks.remoteClient(remoteID, remote, token)
343 if err == auth.ErrObsoleteToken {
344 return 0, httpserver.ErrorWithStatus(err, http.StatusBadRequest)
345 } else if err != nil {
349 part = "A" + part[7:]
351 parts = append(parts, part)
353 if remoteClient == nil {
354 return 0, httpserver.ErrorWithStatus(errors.New("invalid remote hint"), http.StatusBadRequest)
356 locator := strings.Join(parts, "+")
357 if opts.LocalLocator == nil {
358 // Read from remote cluster and stream response back
360 if rw, ok := opts.WriteTo.(http.ResponseWriter); ok && li.size > 0 {
361 rw.Header().Set("Content-Length", fmt.Sprintf("%d", li.size))
363 return remoteClient.BlockRead(ctx, arvados.BlockReadOptions{
365 WriteTo: opts.WriteTo,
368 // We must call LocalLocator before writing any data to
369 // opts.WriteTo, otherwise the caller can't put the local
370 // locator in a response header. So we copy into memory,
371 // generate the local signature, then copy from memory to
373 buf, err := ks.bufferPool.GetContext(ctx)
377 defer ks.bufferPool.Put(buf)
378 writebuf := bytes.NewBuffer(buf[:0])
379 ks.logger.Infof("blockReadRemote(%s): remote read(%s)", opts.Locator, locator)
380 _, err = remoteClient.BlockRead(ctx, arvados.BlockReadOptions{
387 resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
389 Data: writebuf.Bytes(),
394 opts.LocalLocator(resp.Locator)
395 if rw, ok := opts.WriteTo.(http.ResponseWriter); ok {
396 rw.Header().Set("Content-Length", fmt.Sprintf("%d", writebuf.Len()))
398 n, err := io.Copy(opts.WriteTo, bytes.NewReader(writebuf.Bytes()))
402 func (ks *keepstore) remoteClient(remoteID string, remoteCluster arvados.RemoteCluster, token string) (*keepclient.KeepClient, error) {
403 ks.remoteClientsMtx.Lock()
404 kc, ok := ks.remoteClients[remoteID]
405 ks.remoteClientsMtx.Unlock()
407 c := &arvados.Client{
408 APIHost: remoteCluster.Host,
410 Insecure: remoteCluster.Insecure,
412 ac, err := arvadosclient.New(c)
416 kc, err = keepclient.MakeKeepClient(ac)
420 kc.DiskCacheSize = keepclient.DiskCacheDisabled
422 ks.remoteClientsMtx.Lock()
423 ks.remoteClients[remoteID] = kc
424 ks.remoteClientsMtx.Unlock()
426 accopy := *kc.Arvados
427 accopy.ApiToken = token
429 kccopy.Arvados = &accopy
430 token, err := auth.SaltToken(token, remoteID)
434 kccopy.Arvados.ApiToken = token
438 // BlockWrite writes a block to one or more volumes.
439 func (ks *keepstore) BlockWrite(ctx context.Context, opts arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {
440 var resp arvados.BlockWriteResponse
442 if opts.Data == nil {
443 buf, err := ks.bufferPool.GetContext(ctx)
447 defer ks.bufferPool.Put(buf)
448 w := bytes.NewBuffer(buf[:0])
450 limitedReader := &io.LimitedReader{R: opts.Reader, N: BlockSize}
451 n, err := io.Copy(io.MultiWriter(w, h), limitedReader)
455 if limitedReader.N == 0 {
456 // Data size is either exactly BlockSize, or too big.
457 n, err := opts.Reader.Read(make([]byte, 1))
459 return resp, httpserver.ErrorWithStatus(err, http.StatusRequestEntityTooLarge)
466 if opts.DataSize != 0 && int(n) != opts.DataSize {
467 return resp, httpserver.ErrorWithStatus(fmt.Errorf("content length %d did not match specified data size %d", n, opts.DataSize), http.StatusBadRequest)
469 hash = fmt.Sprintf("%x", h.Sum(nil))
471 hash = fmt.Sprintf("%x", md5.Sum(opts.Data))
473 if opts.Hash != "" && !strings.HasPrefix(opts.Hash, hash) {
474 return resp, httpserver.ErrorWithStatus(fmt.Errorf("content hash %s did not match specified locator %s", hash, opts.Hash), http.StatusBadRequest)
476 rvzmounts := ks.rendezvous(hash, ks.mountsW)
477 result := newPutProgress(opts.StorageClasses)
478 for _, mnt := range rvzmounts {
479 if !result.Want(mnt) {
482 cmp := &checkEqual{Expect: opts.Data}
483 if err := mnt.BlockRead(ctx, hash, cmp); err == nil {
485 return resp, errCollision
487 err := mnt.BlockTouch(hash)
493 var allFull atomic.Bool
495 // pending tracks what result will be if all outstanding
497 pending := result.Copy()
498 cond := sync.NewCond(new(sync.Mutex))
500 var wg sync.WaitGroup
502 for _, mnt := range rvzmounts {
504 if result.Done() || ctx.Err() != nil {
507 if !result.Want(mnt) {
510 if pending.Want(mnt) {
513 // This mount might not be needed, depending
514 // on the outcome of pending writes. Wait for
515 // a pending write to finish, then check
520 logger := ks.logger.WithField("mount", mnt.UUID)
525 logger.Debug("start write")
526 err := mnt.BlockWrite(ctx, hash, opts.Data)
528 defer cond.L.Unlock()
529 defer cond.Broadcast()
531 logger.Debug("write failed")
544 if ctx.Err() != nil {
545 return resp, ctx.Err()
547 if result.Done() || result.totalReplication > 0 {
548 resp = arvados.BlockWriteResponse{
549 Locator: ks.signLocator(ctxToken(ctx), fmt.Sprintf("%s+%d", hash, len(opts.Data))),
550 Replicas: result.totalReplication,
551 StorageClasses: result.classDone,
558 return resp, errVolumeUnavailable
561 // rendezvous sorts the given mounts by descending priority, then by
562 // rendezvous order for the given locator.
563 func (*keepstore) rendezvous(locator string, mnts []*mount) []*mount {
568 // copy the provided []*mount before doing an in-place sort
569 mnts = append([]*mount(nil), mnts...)
570 weight := make(map[*mount]string)
571 for _, mnt := range mnts {
573 if len(uuidpart) == 27 {
574 // strip zzzzz-yyyyy- prefixes
575 uuidpart = uuidpart[12:]
577 weight[mnt] = fmt.Sprintf("%x", md5.Sum([]byte(hash+uuidpart)))
579 sort.Slice(mnts, func(i, j int) bool {
580 if p := mnts[i].priority - mnts[j].priority; p != 0 {
583 return weight[mnts[i]] < weight[mnts[j]]
588 // checkEqual reports whether the data written to it (via io.WriterAt
589 // interface) is equal to the expected data.
591 // Expect should not be changed after the first Write.
593 // Results are undefined if WriteAt is called with overlapping ranges.
594 type checkEqual struct {
600 func (ce *checkEqual) Equal() bool {
601 return !ce.notequal.Load() && ce.equal.Load() == int64(len(ce.Expect))
604 func (ce *checkEqual) WriteAt(p []byte, offset int64) (int, error) {
605 endpos := int(offset) + len(p)
606 if offset >= 0 && endpos <= len(ce.Expect) && bytes.Equal(p, ce.Expect[int(offset):endpos]) {
607 ce.equal.Add(int64(len(p)))
609 ce.notequal.Store(true)
614 func (ks *keepstore) BlockUntrash(ctx context.Context, locator string) error {
615 li, err := getLocatorInfo(locator)
619 var errToCaller error = os.ErrNotExist
620 for _, mnt := range ks.mountsW {
621 if ctx.Err() != nil {
624 err := mnt.BlockUntrash(li.hash)
627 } else if !os.IsNotExist(err) && errToCaller != nil {
634 func (ks *keepstore) BlockTouch(ctx context.Context, locator string) error {
635 li, err := getLocatorInfo(locator)
639 var errToCaller error = os.ErrNotExist
640 for _, mnt := range ks.mountsW {
641 if ctx.Err() != nil {
644 err := mnt.BlockTouch(li.hash)
648 if !os.IsNotExist(err) {
655 func (ks *keepstore) BlockTrash(ctx context.Context, locator string) error {
656 if !ks.cluster.Collections.BlobTrash {
657 return errMethodNotAllowed
659 li, err := getLocatorInfo(locator)
663 var errToCaller error = os.ErrNotExist
664 for _, mnt := range ks.mounts {
668 if ctx.Err() != nil {
671 t, err := mnt.Mtime(li.hash)
672 if err == nil && time.Now().Sub(t) > ks.cluster.Collections.BlobSigningTTL.Duration() {
673 err = mnt.BlockTrash(li.hash)
675 if os.IsNotExist(errToCaller) || (errToCaller == nil && !os.IsNotExist(err)) {
682 func (ks *keepstore) Mounts() []*mount {
686 func (ks *keepstore) Index(ctx context.Context, opts indexOptions) error {
688 if opts.MountUUID != "" {
689 mnt, ok := ks.mounts[opts.MountUUID]
691 return os.ErrNotExist
693 mounts = []*mount{mnt}
695 for _, mnt := range mounts {
696 err := mnt.Index(ctx, opts.Prefix, opts.WriteTo)
704 func ctxToken(ctx context.Context) string {
705 if c, ok := auth.FromContext(ctx); ok && len(c.Tokens) > 0 {
712 // locatorInfo expresses the attributes of a locator that are relevant
713 // for keepstore decision-making.
714 type locatorInfo struct {
717 remote bool // locator has a +R hint
718 signed bool // locator has a +A hint
721 func getLocatorInfo(loc string) (locatorInfo, error) {
723 plus := 0 // number of '+' chars seen so far
724 partlen := 0 // chars since last '+'
725 for i, c := range loc + "+" {
728 // double/leading/trailing '+'
729 return li, errInvalidLocator
733 return li, errInvalidLocator
738 if size, err := strconv.Atoi(loc[i-partlen : i]); err == nil {
754 if plus > 1 && c >= '0' && c <= '9' {
755 // size, if present at all, must come first
756 return li, errInvalidLocator
759 if plus == 0 && !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f')) {
760 // non-hexadecimal char in hash part
761 return li, errInvalidLocator