16427: Merge branch 'master'
authorTom Clegg <tom@tomclegg.ca>
Fri, 5 Jun 2020 14:51:17 +0000 (10:51 -0400)
committerTom Clegg <tom@tomclegg.ca>
Fri, 5 Jun 2020 14:51:17 +0000 (10:51 -0400)
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@tomclegg.ca>

12 files changed:
cmd/arvados-server/cmd.go
lib/config/load.go
lib/undelete/cmd.go [new file with mode: 0644]
lib/undelete/cmd_test.go [new file with mode: 0644]
sdk/go/arvados/blob_signature.go [new file with mode: 0644]
sdk/go/arvados/blob_signature_test.go [new file with mode: 0644]
sdk/go/arvados/keep_service.go
sdk/go/keepclient/perms.go
sdk/go/keepclient/perms_test.go [deleted file]
services/keepstore/handler_test.go
services/keepstore/handlers.go
services/keepstore/volume_test.go

index fcea2223da70d5a174ee74b8281ebd3d20e0b503..1b2de11accefe995511194c5941f25af3ccd35e4 100644 (file)
@@ -15,6 +15,7 @@ import (
        "git.arvados.org/arvados.git/lib/crunchrun"
        "git.arvados.org/arvados.git/lib/dispatchcloud"
        "git.arvados.org/arvados.git/lib/install"
+       "git.arvados.org/arvados.git/lib/undelete"
        "git.arvados.org/arvados.git/services/ws"
 )
 
@@ -33,6 +34,7 @@ var (
                "crunch-run":      crunchrun.Command,
                "dispatch-cloud":  dispatchcloud.Command,
                "install":         install.Command,
+               "undelete":        undelete.Command,
                "ws":              ws.Command,
        })
 )
index 86a8f7df6d2cd4ccfdb68beec66f71dd7204f4cc..be6181bbe9bbc033cd9241cc14a8eca36ed23610 100644 (file)
@@ -64,14 +64,16 @@ func NewLoader(stdin io.Reader, logger logrus.FieldLogger) *Loader {
 //     // ldr.Path == "/tmp/c.yaml"
 func (ldr *Loader) SetupFlags(flagset *flag.FlagSet) {
        flagset.StringVar(&ldr.Path, "config", arvados.DefaultConfigFile, "Site configuration `file` (default may be overridden by setting an ARVADOS_CONFIG environment variable)")
-       flagset.StringVar(&ldr.KeepstorePath, "legacy-keepstore-config", defaultKeepstoreConfigPath, "Legacy keepstore configuration `file`")
-       flagset.StringVar(&ldr.KeepWebPath, "legacy-keepweb-config", defaultKeepWebConfigPath, "Legacy keep-web configuration `file`")
-       flagset.StringVar(&ldr.CrunchDispatchSlurmPath, "legacy-crunch-dispatch-slurm-config", defaultCrunchDispatchSlurmConfigPath, "Legacy crunch-dispatch-slurm configuration `file`")
-       flagset.StringVar(&ldr.WebsocketPath, "legacy-ws-config", defaultWebsocketConfigPath, "Legacy arvados-ws configuration `file`")
-       flagset.StringVar(&ldr.KeepproxyPath, "legacy-keepproxy-config", defaultKeepproxyConfigPath, "Legacy keepproxy configuration `file`")
-       flagset.StringVar(&ldr.GitHttpdPath, "legacy-git-httpd-config", defaultGitHttpdConfigPath, "Legacy arv-git-httpd configuration `file`")
-       flagset.StringVar(&ldr.KeepBalancePath, "legacy-keepbalance-config", defaultKeepBalanceConfigPath, "Legacy keep-balance configuration `file`")
-       flagset.BoolVar(&ldr.SkipLegacy, "skip-legacy", false, "Don't load legacy config files")
+       if !ldr.SkipLegacy {
+               flagset.StringVar(&ldr.KeepstorePath, "legacy-keepstore-config", defaultKeepstoreConfigPath, "Legacy keepstore configuration `file`")
+               flagset.StringVar(&ldr.KeepWebPath, "legacy-keepweb-config", defaultKeepWebConfigPath, "Legacy keep-web configuration `file`")
+               flagset.StringVar(&ldr.CrunchDispatchSlurmPath, "legacy-crunch-dispatch-slurm-config", defaultCrunchDispatchSlurmConfigPath, "Legacy crunch-dispatch-slurm configuration `file`")
+               flagset.StringVar(&ldr.WebsocketPath, "legacy-ws-config", defaultWebsocketConfigPath, "Legacy arvados-ws configuration `file`")
+               flagset.StringVar(&ldr.KeepproxyPath, "legacy-keepproxy-config", defaultKeepproxyConfigPath, "Legacy keepproxy configuration `file`")
+               flagset.StringVar(&ldr.GitHttpdPath, "legacy-git-httpd-config", defaultGitHttpdConfigPath, "Legacy arv-git-httpd configuration `file`")
+               flagset.StringVar(&ldr.KeepBalancePath, "legacy-keepbalance-config", defaultKeepBalanceConfigPath, "Legacy keep-balance configuration `file`")
+               flagset.BoolVar(&ldr.SkipLegacy, "skip-legacy", false, "Don't load legacy config files")
+       }
 }
 
 // MungeLegacyConfigArgs checks args for a -config flag whose argument
diff --git a/lib/undelete/cmd.go b/lib/undelete/cmd.go
new file mode 100644 (file)
index 0000000..cc0ea09
--- /dev/null
@@ -0,0 +1,318 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package undelete
+
+import (
+       "context"
+       "errors"
+       "flag"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "strings"
+       "sync"
+       "time"
+
+       "git.arvados.org/arvados.git/lib/config"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "github.com/sirupsen/logrus"
+)
+
+var Command command
+
+type command struct{}
+
+func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+       var err error
+       logger := ctxlog.New(stderr, "text", "info")
+       defer func() {
+               if err != nil {
+                       logger.WithError(err).Error("fatal")
+               }
+               logger.Info("exiting")
+       }()
+
+       loader := config.NewLoader(stdin, logger)
+       loader.SkipLegacy = true
+
+       flags := flag.NewFlagSet("", flag.ContinueOnError)
+       flags.SetOutput(stderr)
+       flags.Usage = func() {
+               fmt.Fprintf(flags.Output(), `Usage:
+       %s [options ...] /path/to/manifest.txt [...]
+
+       This program recovers deleted collections. Recovery is
+       possible when the collection's manifest is still available and
+       all of its data blocks are still available or recoverable
+       (e.g., garbage collection is not enabled, the blocks are too
+       new for garbage collection, the blocks are referenced by other
+       collections, or the blocks have been trashed but not yet
+       deleted).
+
+       For each provided collection manifest, once all data blocks
+       are recovered/protected from garbage collection, a new
+       collection is saved and its UUID is printed on stdout.
+
+       Exit status will be zero if recovery is successful, i.e., a
+       collection is saved for each provided manifest.
+Options:
+`, prog)
+               flags.PrintDefaults()
+       }
+       loader.SetupFlags(flags)
+       loglevel := flags.String("log-level", "info", "logging level (debug, info, ...)")
+       err = flags.Parse(args)
+       if err == flag.ErrHelp {
+               err = nil
+               return 0
+       } else if err != nil {
+               return 2
+       }
+
+       if len(flags.Args()) == 0 {
+               flags.Usage()
+               return 2
+       }
+
+       lvl, err := logrus.ParseLevel(*loglevel)
+       if err != nil {
+               return 2
+       }
+       logger.SetLevel(lvl)
+
+       cfg, err := loader.Load()
+       if err != nil {
+               return 1
+       }
+       cluster, err := cfg.GetCluster("")
+       if err != nil {
+               return 1
+       }
+       client, err := arvados.NewClientFromConfig(cluster)
+       if err != nil {
+               return 1
+       }
+       client.AuthToken = cluster.SystemRootToken
+       und := undeleter{
+               client:  client,
+               cluster: cluster,
+               logger:  logger,
+       }
+
+       exitcode := 0
+       for _, src := range flags.Args() {
+               logger := logger.WithField("src", src)
+               if len(src) == 27 && src[5:12] == "-57u5n-" {
+                       logger.Error("log entry lookup not implemented")
+                       exitcode = 1
+                       continue
+               } else {
+                       mtxt, err := ioutil.ReadFile(src)
+                       if err != nil {
+                               logger.WithError(err).Error("error loading manifest data")
+                               exitcode = 1
+                               continue
+                       }
+                       uuid, err := und.RecoverManifest(string(mtxt))
+                       if err != nil {
+                               logger.WithError(err).Error("recovery failed")
+                               exitcode = 1
+                               continue
+                       }
+                       logger.WithField("UUID", uuid).Info("recovery succeeded")
+                       fmt.Fprintln(stdout, uuid)
+               }
+       }
+       return exitcode
+}
+
+type undeleter struct {
+       client  *arvados.Client
+       cluster *arvados.Cluster
+       logger  logrus.FieldLogger
+}
+
+var errNotFound = errors.New("not found")
+
+// Return the timestamp of the newest copy of blk on svc. Second
+// return value is false if blk is not on svc at all, or an error
+// occurs.
+func (und undeleter) newestMtime(logger logrus.FieldLogger, blk string, svc arvados.KeepService) (time.Time, error) {
+       found, err := svc.Index(und.client, blk)
+       if err != nil {
+               logger.WithError(err).Warn("error getting index")
+               return time.Time{}, err
+       } else if len(found) == 0 {
+               return time.Time{}, errNotFound
+       }
+       var latest time.Time
+       for _, ent := range found {
+               t := time.Unix(0, ent.Mtime)
+               if t.After(latest) {
+                       latest = t
+               }
+       }
+       logger.WithField("latest", latest).Debug("found")
+       return latest, nil
+}
+
+var errTouchIneffective = errors.New("(BUG?) touch succeeded but had no effect -- reported timestamp is still too old")
+
+// Ensure the given block exists on the given server and won't be
+// eligible for trashing until after our chosen deadline (blobsigexp).
+// Returns an error if the block doesn't exist on the given server, or
+// has an old timestamp and can't be updated.  Reports errors via
+// logger.
+//
+// After we decide a block is "safe" (whether or not we had to untrash
+// it), keep-balance might notice that it's currently unreferenced and
+// decide to trash it, all before our recovered collection gets
+// saved. But if the block's timestamp is more recent than blobsigttl,
+// keepstore will refuse to trash it even if told to by keep-balance.
+func (und undeleter) ensureSafe(ctx context.Context, logger logrus.FieldLogger, blk string, svc arvados.KeepService, blobsigttl time.Duration, blobsigexp time.Time) error {
+       if latest, err := und.newestMtime(logger, blk, svc); err != nil {
+               return err
+       } else if latest.Add(blobsigttl).After(blobsigexp) {
+               return nil
+       }
+       if err := svc.Touch(ctx, und.client, blk); err != nil {
+               return fmt.Errorf("error updating timestamp: %s", err)
+       }
+       logger.Debug("updated timestamp")
+       if latest, err := und.newestMtime(logger, blk, svc); err == errNotFound {
+               return fmt.Errorf("(BUG?) touch succeeded, but then block did not appear in index")
+       } else if err != nil {
+               return err
+       } else if latest.Add(blobsigttl).After(blobsigexp) {
+               return nil
+       } else {
+               return errTouchIneffective
+       }
+}
+
+// Untrash and update GC timestamps (as needed) on blocks referenced
+// by the given manifest, save a new collection and return the new
+// collection's UUID.
+func (und undeleter) RecoverManifest(mtxt string) (string, error) {
+       ctx, cancel := context.WithCancel(context.Background())
+       defer cancel()
+
+       coll := arvados.Collection{ManifestText: mtxt}
+       blks, err := coll.SizedDigests()
+       if err != nil {
+               return "", err
+       }
+       todo := make(chan int, len(blks))
+       for idx := range blks {
+               todo <- idx
+       }
+       go close(todo)
+
+       var services []arvados.KeepService
+       err = und.client.EachKeepService(func(svc arvados.KeepService) error {
+               if svc.ServiceType == "proxy" {
+                       und.logger.WithField("service", svc).Debug("ignore proxy service")
+               } else {
+                       services = append(services, svc)
+               }
+               return nil
+       })
+       if err != nil {
+               return "", fmt.Errorf("error getting list of keep services: %s", err)
+       }
+       und.logger.WithField("services", services).Debug("got list of services")
+
+       // blobsigexp is our deadline for saving the rescued
+       // collection. This must be less than BlobSigningTTL
+       // (otherwise our rescued blocks could be garbage collected
+       // again before we protect them by saving the collection) but
+       // the exact value is somewhat arbitrary. If it's too soon, it
+       // will arrive before we're ready to save, and save will
+       // fail. If it's too late, we'll needlessly update timestamps
+       // on some blocks that were recently written/touched (e.g., by
+       // a previous attempt to rescue this same collection) and
+       // would have lived long enough anyway if left alone.
+       // BlobSigningTTL/2 (typically around 1 week) is much longer
+       // than than we need to recover even a very large collection.
+       blobsigttl := und.cluster.Collections.BlobSigningTTL.Duration()
+       blobsigexp := time.Now().Add(blobsigttl / 2)
+       und.logger.WithField("blobsigexp", blobsigexp).Debug("chose save deadline")
+
+       // We'll start a number of threads, each working on
+       // checking/recovering one block at a time. The threads
+       // themselves don't need much CPU/memory, but to avoid hitting
+       // limits on keepstore connections, backend storage bandwidth,
+       // etc., we limit concurrency to 2 per keepstore node.
+       workerThreads := 2 * len(services)
+
+       blkFound := make([]bool, len(blks))
+       var wg sync.WaitGroup
+       for i := 0; i < workerThreads; i++ {
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+               nextblk:
+                       for idx := range todo {
+                               blk := strings.SplitN(string(blks[idx]), "+", 2)[0]
+                               logger := und.logger.WithField("block", blk)
+                               for _, untrashing := range []bool{false, true} {
+                                       for _, svc := range services {
+                                               logger := logger.WithField("service", fmt.Sprintf("%s:%d", svc.ServiceHost, svc.ServicePort))
+                                               if untrashing {
+                                                       if err := svc.Untrash(ctx, und.client, blk); err != nil {
+                                                               logger.WithError(err).Debug("untrash failed")
+                                                               continue
+                                                       }
+                                                       logger.Info("untrashed")
+                                               }
+                                               err := und.ensureSafe(ctx, logger, blk, svc, blobsigttl, blobsigexp)
+                                               if err == errNotFound {
+                                                       logger.Debug(err)
+                                               } else if err != nil {
+                                                       logger.Error(err)
+                                               } else {
+                                                       blkFound[idx] = true
+                                                       continue nextblk
+                                               }
+                                       }
+                               }
+                               logger.Debug("unrecoverable")
+                       }
+               }()
+       }
+       wg.Wait()
+
+       var have, havenot int
+       for _, ok := range blkFound {
+               if ok {
+                       have++
+               } else {
+                       havenot++
+               }
+       }
+       if havenot > 0 {
+               if have > 0 {
+                       und.logger.Warn("partial recovery is not implemented")
+               }
+               return "", fmt.Errorf("unable to recover %d of %d blocks", havenot, have+havenot)
+       }
+
+       if und.cluster.Collections.BlobSigning {
+               key := []byte(und.cluster.Collections.BlobSigningKey)
+               coll.ManifestText = arvados.SignManifest(coll.ManifestText, und.client.AuthToken, blobsigexp, blobsigttl, key)
+       }
+       und.logger.WithField("manifest", coll.ManifestText).Debug("updated blob signatures in manifest")
+       err = und.client.RequestAndDecodeContext(ctx, &coll, "POST", "arvados/v1/collections", nil, map[string]interface{}{
+               "collection": map[string]interface{}{
+                       "manifest_text": coll.ManifestText,
+               },
+       })
+       if err != nil {
+               return "", fmt.Errorf("error saving new collection: %s", err)
+       }
+       und.logger.WithField("UUID", coll.UUID).Debug("created new collection")
+       return coll.UUID, nil
+}
diff --git a/lib/undelete/cmd_test.go b/lib/undelete/cmd_test.go
new file mode 100644 (file)
index 0000000..a5edaf9
--- /dev/null
@@ -0,0 +1,117 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package undelete
+
+import (
+       "bytes"
+       "encoding/json"
+       "io/ioutil"
+       "os"
+       "testing"
+       "time"
+
+       "git.arvados.org/arvados.git/lib/config"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+       check.TestingT(t)
+}
+
+var _ = check.Suite(&Suite{})
+
+type Suite struct{}
+
+func (*Suite) SetUpSuite(c *check.C) {
+       arvadostest.StartAPI()
+       arvadostest.StartKeep(2, true)
+}
+
+func (*Suite) TestUnrecoverableBlock(c *check.C) {
+       tmp := c.MkDir()
+       mfile := tmp + "/manifest"
+       ioutil.WriteFile(mfile, []byte(". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+410 0:410:Gone\n"), 0777)
+       var stdout, stderr bytes.Buffer
+       exitcode := Command.RunCommand("undelete.test", []string{"-log-level=debug", mfile}, &bytes.Buffer{}, &stdout, &stderr)
+       c.Check(exitcode, check.Equals, 1)
+       c.Check(stdout.String(), check.Equals, "")
+       c.Log(stderr.String())
+       c.Check(stderr.String(), check.Matches, `(?ms).*msg="not found" block=aaaaa.*`)
+       c.Check(stderr.String(), check.Matches, `(?ms).*msg="untrash failed" block=aaaaa.*`)
+       c.Check(stderr.String(), check.Matches, `(?ms).*msg=unrecoverable block=aaaaa.*`)
+       c.Check(stderr.String(), check.Matches, `(?ms).*msg="recovery failed".*`)
+}
+
+func (*Suite) TestUntrashAndTouchBlock(c *check.C) {
+       tmp := c.MkDir()
+       mfile := tmp + "/manifest"
+       ioutil.WriteFile(mfile, []byte(". dcd0348cb2532ee90c99f1b846efaee7+13 0:13:test.txt\n"), 0777)
+
+       logger := ctxlog.TestLogger(c)
+       loader := config.NewLoader(&bytes.Buffer{}, logger)
+       cfg, err := loader.Load()
+       c.Assert(err, check.IsNil)
+       cluster, err := cfg.GetCluster("")
+       c.Assert(err, check.IsNil)
+       var datadirs []string
+       for _, v := range cluster.Volumes {
+               var params struct {
+                       Root string
+               }
+               err := json.Unmarshal(v.DriverParameters, &params)
+               c.Assert(err, check.IsNil)
+               if params.Root != "" {
+                       datadirs = append(datadirs, params.Root)
+                       err := os.Remove(params.Root + "/dcd/dcd0348cb2532ee90c99f1b846efaee7")
+                       if err != nil && !os.IsNotExist(err) {
+                               c.Error(err)
+                       }
+               }
+       }
+       c.Logf("keepstore datadirs are %q", datadirs)
+
+       // Currently StartKeep(2, true) uses dirs called "keep0" and
+       // "keep1" so we could just put our fake trashed file in keep0
+       // ... but we don't want to rely on arvadostest's
+       // implementation details, so we put a trashed file in every
+       // dir that keepstore might be using.
+       for _, datadir := range datadirs {
+               if fi, err := os.Stat(datadir); err != nil || !fi.IsDir() {
+                       continue
+               }
+               c.Logf("placing backdated trashed block in datadir %q", datadir)
+               trashfile := datadir + "/dcd/dcd0348cb2532ee90c99f1b846efaee7.trash.999999999"
+               os.Mkdir(datadir+"/dcd", 0777)
+               err = ioutil.WriteFile(trashfile, []byte("undelete test"), 0777)
+               c.Assert(err, check.IsNil)
+               t := time.Now().Add(-time.Hour * 24 * 365)
+               err = os.Chtimes(trashfile, t, t)
+       }
+
+       var stdout, stderr bytes.Buffer
+       exitcode := Command.RunCommand("undelete.test", []string{"-log-level=debug", mfile}, &bytes.Buffer{}, &stdout, &stderr)
+       c.Check(exitcode, check.Equals, 0)
+       c.Check(stdout.String(), check.Matches, `zzzzz-4zz18-.{15}\n`)
+       c.Log(stderr.String())
+       c.Check(stderr.String(), check.Matches, `(?ms).*msg=untrashed block=dcd0348.*`)
+       c.Check(stderr.String(), check.Matches, `(?ms).*msg="updated timestamp" block=dcd0348.*`)
+
+       found := false
+       for _, datadir := range datadirs {
+               buf, err := ioutil.ReadFile(datadir + "/dcd/dcd0348cb2532ee90c99f1b846efaee7")
+               if err == nil {
+                       found = true
+                       c.Check(buf, check.DeepEquals, []byte("undelete test"))
+                       fi, err := os.Stat(datadir + "/dcd/dcd0348cb2532ee90c99f1b846efaee7")
+                       if c.Check(err, check.IsNil) {
+                               c.Logf("recovered block's modtime is %s", fi.ModTime())
+                               c.Check(time.Now().Sub(fi.ModTime()) < time.Hour, check.Equals, true)
+                       }
+               }
+       }
+       c.Check(found, check.Equals, true)
+}
diff --git a/sdk/go/arvados/blob_signature.go b/sdk/go/arvados/blob_signature.go
new file mode 100644 (file)
index 0000000..1329395
--- /dev/null
@@ -0,0 +1,126 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+// Generate and verify permission signatures for Keep locators.
+//
+// See https://dev.arvados.org/projects/arvados/wiki/Keep_locator_format
+
+package arvados
+
+import (
+       "crypto/hmac"
+       "crypto/sha1"
+       "errors"
+       "fmt"
+       "regexp"
+       "strconv"
+       "strings"
+       "time"
+)
+
+var (
+       // ErrSignatureExpired - a signature was rejected because the
+       // expiry time has passed.
+       ErrSignatureExpired = errors.New("Signature expired")
+       // ErrSignatureInvalid - a signature was rejected because it
+       // was badly formatted or did not match the given secret key.
+       ErrSignatureInvalid = errors.New("Invalid signature")
+       // ErrSignatureMissing - the given locator does not have a
+       // signature hint.
+       ErrSignatureMissing = errors.New("Missing signature")
+)
+
+// makePermSignature generates a SHA-1 HMAC digest for the given blob,
+// token, expiry, and site secret.
+func makePermSignature(blobHash, apiToken, expiry, blobSignatureTTL string, permissionSecret []byte) string {
+       hmac := hmac.New(sha1.New, permissionSecret)
+       hmac.Write([]byte(blobHash))
+       hmac.Write([]byte("@"))
+       hmac.Write([]byte(apiToken))
+       hmac.Write([]byte("@"))
+       hmac.Write([]byte(expiry))
+       hmac.Write([]byte("@"))
+       hmac.Write([]byte(blobSignatureTTL))
+       digest := hmac.Sum(nil)
+       return fmt.Sprintf("%x", digest)
+}
+
+var (
+       mBlkRe      = regexp.MustCompile(`^[0-9a-f]{32}.*`)
+       mPermHintRe = regexp.MustCompile(`\+A[^+]*`)
+)
+
+// SignManifest signs all locators in the given manifest, discarding
+// any existing signatures.
+func SignManifest(manifest string, apiToken string, expiry time.Time, ttl time.Duration, permissionSecret []byte) string {
+       return regexp.MustCompile(`\S+`).ReplaceAllStringFunc(manifest, func(tok string) string {
+               if mBlkRe.MatchString(tok) {
+                       return SignLocator(mPermHintRe.ReplaceAllString(tok, ""), apiToken, expiry, ttl, permissionSecret)
+               } else {
+                       return tok
+               }
+       })
+}
+
+// SignLocator returns blobLocator with a permission signature
+// added. If either permissionSecret or apiToken is empty, blobLocator
+// is returned untouched.
+//
+// This function is intended to be used by system components and admin
+// utilities: userland programs do not know the permissionSecret.
+func SignLocator(blobLocator, apiToken string, expiry time.Time, blobSignatureTTL time.Duration, permissionSecret []byte) string {
+       if len(permissionSecret) == 0 || apiToken == "" {
+               return blobLocator
+       }
+       // Strip off all hints: only the hash is used to sign.
+       blobHash := strings.Split(blobLocator, "+")[0]
+       timestampHex := fmt.Sprintf("%08x", expiry.Unix())
+       blobSignatureTTLHex := strconv.FormatInt(int64(blobSignatureTTL.Seconds()), 16)
+       return blobLocator +
+               "+A" + makePermSignature(blobHash, apiToken, timestampHex, blobSignatureTTLHex, permissionSecret) +
+               "@" + timestampHex
+}
+
+var SignedLocatorRe = regexp.MustCompile(
+       //1                 2          34                         5   6                  7                 89
+       `^([[:xdigit:]]{32})(\+[0-9]+)?((\+[B-Z][A-Za-z0-9@_-]*)*)(\+A([[:xdigit:]]{40})@([[:xdigit:]]{8}))((\+[B-Z][A-Za-z0-9@_-]*)*)$`)
+
+// VerifySignature returns nil if the signature on the signedLocator
+// can be verified using the given apiToken. Otherwise it returns
+// ErrSignatureExpired (if the signature's expiry time has passed,
+// which is something the client could have figured out
+// independently), ErrSignatureMissing (if there is no signature hint
+// at all), or ErrSignatureInvalid (if the signature is present but
+// badly formatted or incorrect).
+//
+// This function is intended to be used by system components and admin
+// utilities: userland programs do not know the permissionSecret.
+func VerifySignature(signedLocator, apiToken string, blobSignatureTTL time.Duration, permissionSecret []byte) error {
+       matches := SignedLocatorRe.FindStringSubmatch(signedLocator)
+       if matches == nil {
+               return ErrSignatureMissing
+       }
+       blobHash := matches[1]
+       signatureHex := matches[6]
+       expiryHex := matches[7]
+       if expiryTime, err := parseHexTimestamp(expiryHex); err != nil {
+               return ErrSignatureInvalid
+       } else if expiryTime.Before(time.Now()) {
+               return ErrSignatureExpired
+       }
+       blobSignatureTTLHex := strconv.FormatInt(int64(blobSignatureTTL.Seconds()), 16)
+       if signatureHex != makePermSignature(blobHash, apiToken, expiryHex, blobSignatureTTLHex, permissionSecret) {
+               return ErrSignatureInvalid
+       }
+       return nil
+}
+
+func parseHexTimestamp(timestampHex string) (ts time.Time, err error) {
+       if tsInt, e := strconv.ParseInt(timestampHex, 16, 0); e == nil {
+               ts = time.Unix(tsInt, 0)
+       } else {
+               err = e
+       }
+       return ts, err
+}
diff --git a/sdk/go/arvados/blob_signature_test.go b/sdk/go/arvados/blob_signature_test.go
new file mode 100644 (file)
index 0000000..847f9a8
--- /dev/null
@@ -0,0 +1,88 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "time"
+
+       check "gopkg.in/check.v1"
+)
+
+const (
+       knownHash    = "acbd18db4cc2f85cedef654fccc4a4d8"
+       knownLocator = knownHash + "+3"
+       knownToken   = "hocfupkn2pjhrpgp2vxv8rsku7tvtx49arbc9s4bvu7p7wxqvk"
+       knownKey     = "13u9fkuccnboeewr0ne3mvapk28epf68a3bhj9q8sb4l6e4e5mkk" +
+               "p6nhj2mmpscgu1zze5h5enydxfe3j215024u16ij4hjaiqs5u4pzsl3nczmaoxnc" +
+               "ljkm4875xqn4xv058koz3vkptmzhyheiy6wzevzjmdvxhvcqsvr5abhl15c2d4o4" +
+               "jhl0s91lojy1mtrzqqvprqcverls0xvy9vai9t1l1lvvazpuadafm71jl4mrwq2y" +
+               "gokee3eamvjy8qq1fvy238838enjmy5wzy2md7yvsitp5vztft6j4q866efym7e6" +
+               "vu5wm9fpnwjyxfldw3vbo01mgjs75rgo7qioh8z8ij7jpyp8508okhgbbex3ceei" +
+               "786u5rw2a9gx743dj3fgq2irk"
+       knownSignature     = "89118b78732c33104a4d6231e8b5a5fa1e4301e3"
+       knownTimestamp     = "7fffffff"
+       knownSigHint       = "+A" + knownSignature + "@" + knownTimestamp
+       knownSignedLocator = knownLocator + knownSigHint
+       blobSignatureTTL   = 1209600 * time.Second
+)
+
+var _ = check.Suite(&BlobSignatureSuite{})
+
+type BlobSignatureSuite struct{}
+
+func (s *BlobSignatureSuite) TestSignLocator(c *check.C) {
+       ts, err := parseHexTimestamp(knownTimestamp)
+       c.Check(err, check.IsNil)
+       c.Check(SignLocator(knownLocator, knownToken, ts, blobSignatureTTL, []byte(knownKey)), check.Equals, knownSignedLocator)
+}
+
+func (s *BlobSignatureSuite) TestVerifySignature(c *check.C) {
+       c.Check(VerifySignature(knownSignedLocator, knownToken, blobSignatureTTL, []byte(knownKey)), check.IsNil)
+}
+
+func (s *BlobSignatureSuite) TestVerifySignatureExtraHints(c *check.C) {
+       // handle hint before permission signature
+       c.Check(VerifySignature(knownLocator+"+K@xyzzy"+knownSigHint, knownToken, blobSignatureTTL, []byte(knownKey)), check.IsNil)
+
+       // handle hint after permission signature
+       c.Check(VerifySignature(knownLocator+knownSigHint+"+Zfoo", knownToken, blobSignatureTTL, []byte(knownKey)), check.IsNil)
+
+       // handle hints around permission signature
+       c.Check(VerifySignature(knownLocator+"+K@xyzzy"+knownSigHint+"+Zfoo", knownToken, blobSignatureTTL, []byte(knownKey)), check.IsNil)
+}
+
+// The size hint on the locator string should not affect signature
+// validation.
+func (s *BlobSignatureSuite) TestVerifySignatureWrongSize(c *check.C) {
+       // handle incorrect size hint
+       c.Check(VerifySignature(knownHash+"+999999"+knownSigHint, knownToken, blobSignatureTTL, []byte(knownKey)), check.IsNil)
+
+       // handle missing size hint
+       c.Check(VerifySignature(knownHash+knownSigHint, knownToken, blobSignatureTTL, []byte(knownKey)), check.IsNil)
+}
+
+func (s *BlobSignatureSuite) TestVerifySignatureBadSig(c *check.C) {
+       badLocator := knownLocator + "+Aaaaaaaaaaaaaaaa@" + knownTimestamp
+       c.Check(VerifySignature(badLocator, knownToken, blobSignatureTTL, []byte(knownKey)), check.Equals, ErrSignatureMissing)
+}
+
+func (s *BlobSignatureSuite) TestVerifySignatureBadTimestamp(c *check.C) {
+       badLocator := knownLocator + "+A" + knownSignature + "@OOOOOOOl"
+       c.Check(VerifySignature(badLocator, knownToken, blobSignatureTTL, []byte(knownKey)), check.Equals, ErrSignatureMissing)
+}
+
+func (s *BlobSignatureSuite) TestVerifySignatureBadSecret(c *check.C) {
+       c.Check(VerifySignature(knownSignedLocator, knownToken, blobSignatureTTL, []byte("00000000000000000000")), check.Equals, ErrSignatureInvalid)
+}
+
+func (s *BlobSignatureSuite) TestVerifySignatureBadToken(c *check.C) {
+       c.Check(VerifySignature(knownSignedLocator, "00000000", blobSignatureTTL, []byte(knownKey)), check.Equals, ErrSignatureInvalid)
+}
+
+func (s *BlobSignatureSuite) TestVerifySignatureExpired(c *check.C) {
+       yesterday := time.Now().AddDate(0, 0, -1)
+       expiredLocator := SignLocator(knownHash, knownToken, yesterday, blobSignatureTTL, []byte(knownKey))
+       c.Check(VerifySignature(expiredLocator, knownToken, blobSignatureTTL, []byte(knownKey)), check.Equals, ErrSignatureExpired)
+}
index 97a62fa7bb3933b89e83e428fc4da39de7453fcd..3af7479202efb46df838e48246ccf9c2f0b5b100 100644 (file)
@@ -6,7 +6,9 @@ package arvados
 
 import (
        "bufio"
+       "context"
        "fmt"
+       "io/ioutil"
        "net/http"
        "strconv"
        "strings"
@@ -102,6 +104,42 @@ func (s *KeepService) Mounts(c *Client) ([]KeepMount, error) {
        return mounts, nil
 }
 
+// Touch updates the timestamp on the given block.
+func (s *KeepService) Touch(ctx context.Context, c *Client, blk string) error {
+       req, err := http.NewRequest("TOUCH", s.url(blk), nil)
+       if err != nil {
+               return err
+       }
+       resp, err := c.Do(req.WithContext(ctx))
+       if err != nil {
+               return err
+       }
+       defer resp.Body.Close()
+       if resp.StatusCode != http.StatusOK {
+               body, _ := ioutil.ReadAll(resp.Body)
+               return fmt.Errorf("%s %s: %s", resp.Proto, resp.Status, body)
+       }
+       return nil
+}
+
+// Untrash moves/copies the given block out of trash.
+func (s *KeepService) Untrash(ctx context.Context, c *Client, blk string) error {
+       req, err := http.NewRequest("PUT", s.url("untrash/"+blk), nil)
+       if err != nil {
+               return err
+       }
+       resp, err := c.Do(req.WithContext(ctx))
+       if err != nil {
+               return err
+       }
+       defer resp.Body.Close()
+       if resp.StatusCode != http.StatusOK {
+               body, _ := ioutil.ReadAll(resp.Body)
+               return fmt.Errorf("%s %s: %s", resp.Proto, resp.Status, body)
+       }
+       return nil
+}
+
 // Index returns an unsorted list of blocks at the given mount point.
 func (s *KeepService) IndexMount(c *Client, mountUUID string, prefix string) ([]KeepServiceIndexEntry, error) {
        return s.index(c, s.url("mounts/"+mountUUID+"/blocks?prefix="+prefix))
index a77983322d618158f0eb8ff6ba6af47814bb894d..23ca7d2f2b43cf778ce16c22f34fa20bd524f091 100644 (file)
 //
 // SPDX-License-Identifier: Apache-2.0
 
-// Generate and verify permission signatures for Keep locators.
-//
-// See https://dev.arvados.org/projects/arvados/wiki/Keep_locator_format
-
 package keepclient
 
-import (
-       "crypto/hmac"
-       "crypto/sha1"
-       "errors"
-       "fmt"
-       "regexp"
-       "strconv"
-       "strings"
-       "time"
-)
+import "git.arvados.org/arvados.git/sdk/go/arvados"
 
 var (
-       // ErrSignatureExpired - a signature was rejected because the
-       // expiry time has passed.
-       ErrSignatureExpired = errors.New("Signature expired")
-       // ErrSignatureInvalid - a signature was rejected because it
-       // was badly formatted or did not match the given secret key.
-       ErrSignatureInvalid = errors.New("Invalid signature")
-       // ErrSignatureMissing - the given locator does not have a
-       // signature hint.
-       ErrSignatureMissing = errors.New("Missing signature")
+       ErrSignatureExpired = arvados.ErrSignatureExpired
+       ErrSignatureInvalid = arvados.ErrSignatureInvalid
+       ErrSignatureMissing = arvados.ErrSignatureMissing
+       SignLocator         = arvados.SignLocator
+       SignedLocatorRe     = arvados.SignedLocatorRe
+       VerifySignature     = arvados.VerifySignature
 )
-
-// makePermSignature generates a SHA-1 HMAC digest for the given blob,
-// token, expiry, and site secret.
-func makePermSignature(blobHash, apiToken, expiry, blobSignatureTTL string, permissionSecret []byte) string {
-       hmac := hmac.New(sha1.New, permissionSecret)
-       hmac.Write([]byte(blobHash))
-       hmac.Write([]byte("@"))
-       hmac.Write([]byte(apiToken))
-       hmac.Write([]byte("@"))
-       hmac.Write([]byte(expiry))
-       hmac.Write([]byte("@"))
-       hmac.Write([]byte(blobSignatureTTL))
-       digest := hmac.Sum(nil)
-       return fmt.Sprintf("%x", digest)
-}
-
-// SignLocator returns blobLocator with a permission signature
-// added. If either permissionSecret or apiToken is empty, blobLocator
-// is returned untouched.
-//
-// This function is intended to be used by system components and admin
-// utilities: userland programs do not know the permissionSecret.
-func SignLocator(blobLocator, apiToken string, expiry time.Time, blobSignatureTTL time.Duration, permissionSecret []byte) string {
-       if len(permissionSecret) == 0 || apiToken == "" {
-               return blobLocator
-       }
-       // Strip off all hints: only the hash is used to sign.
-       blobHash := strings.Split(blobLocator, "+")[0]
-       timestampHex := fmt.Sprintf("%08x", expiry.Unix())
-       blobSignatureTTLHex := strconv.FormatInt(int64(blobSignatureTTL.Seconds()), 16)
-       return blobLocator +
-               "+A" + makePermSignature(blobHash, apiToken, timestampHex, blobSignatureTTLHex, permissionSecret) +
-               "@" + timestampHex
-}
-
-var SignedLocatorRe = regexp.MustCompile(
-       //1                 2          34                         5   6                  7                 89
-       `^([[:xdigit:]]{32})(\+[0-9]+)?((\+[B-Z][A-Za-z0-9@_-]*)*)(\+A([[:xdigit:]]{40})@([[:xdigit:]]{8}))((\+[B-Z][A-Za-z0-9@_-]*)*)$`)
-
-// VerifySignature returns nil if the signature on the signedLocator
-// can be verified using the given apiToken. Otherwise it returns
-// ErrSignatureExpired (if the signature's expiry time has passed,
-// which is something the client could have figured out
-// independently), ErrSignatureMissing (if there is no signature hint
-// at all), or ErrSignatureInvalid (if the signature is present but
-// badly formatted or incorrect).
-//
-// This function is intended to be used by system components and admin
-// utilities: userland programs do not know the permissionSecret.
-func VerifySignature(signedLocator, apiToken string, blobSignatureTTL time.Duration, permissionSecret []byte) error {
-       matches := SignedLocatorRe.FindStringSubmatch(signedLocator)
-       if matches == nil {
-               return ErrSignatureMissing
-       }
-       blobHash := matches[1]
-       signatureHex := matches[6]
-       expiryHex := matches[7]
-       if expiryTime, err := parseHexTimestamp(expiryHex); err != nil {
-               return ErrSignatureInvalid
-       } else if expiryTime.Before(time.Now()) {
-               return ErrSignatureExpired
-       }
-       blobSignatureTTLHex := strconv.FormatInt(int64(blobSignatureTTL.Seconds()), 16)
-       if signatureHex != makePermSignature(blobHash, apiToken, expiryHex, blobSignatureTTLHex, permissionSecret) {
-               return ErrSignatureInvalid
-       }
-       return nil
-}
-
-func parseHexTimestamp(timestampHex string) (ts time.Time, err error) {
-       if tsInt, e := strconv.ParseInt(timestampHex, 16, 0); e == nil {
-               ts = time.Unix(tsInt, 0)
-       } else {
-               err = e
-       }
-       return ts, err
-}
diff --git a/sdk/go/keepclient/perms_test.go b/sdk/go/keepclient/perms_test.go
deleted file mode 100644 (file)
index f8107f4..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: Apache-2.0
-
-package keepclient
-
-import (
-       "testing"
-       "time"
-)
-
-const (
-       knownHash    = "acbd18db4cc2f85cedef654fccc4a4d8"
-       knownLocator = knownHash + "+3"
-       knownToken   = "hocfupkn2pjhrpgp2vxv8rsku7tvtx49arbc9s4bvu7p7wxqvk"
-       knownKey     = "13u9fkuccnboeewr0ne3mvapk28epf68a3bhj9q8sb4l6e4e5mkk" +
-               "p6nhj2mmpscgu1zze5h5enydxfe3j215024u16ij4hjaiqs5u4pzsl3nczmaoxnc" +
-               "ljkm4875xqn4xv058koz3vkptmzhyheiy6wzevzjmdvxhvcqsvr5abhl15c2d4o4" +
-               "jhl0s91lojy1mtrzqqvprqcverls0xvy9vai9t1l1lvvazpuadafm71jl4mrwq2y" +
-               "gokee3eamvjy8qq1fvy238838enjmy5wzy2md7yvsitp5vztft6j4q866efym7e6" +
-               "vu5wm9fpnwjyxfldw3vbo01mgjs75rgo7qioh8z8ij7jpyp8508okhgbbex3ceei" +
-               "786u5rw2a9gx743dj3fgq2irk"
-       knownSignature     = "89118b78732c33104a4d6231e8b5a5fa1e4301e3"
-       knownTimestamp     = "7fffffff"
-       knownSigHint       = "+A" + knownSignature + "@" + knownTimestamp
-       knownSignedLocator = knownLocator + knownSigHint
-       blobSignatureTTL   = 1209600 * time.Second
-)
-
-func TestSignLocator(t *testing.T) {
-       if ts, err := parseHexTimestamp(knownTimestamp); err != nil {
-               t.Errorf("bad knownTimestamp %s", knownTimestamp)
-       } else {
-               if knownSignedLocator != SignLocator(knownLocator, knownToken, ts, blobSignatureTTL, []byte(knownKey)) {
-                       t.Fail()
-               }
-       }
-}
-
-func TestVerifySignature(t *testing.T) {
-       if VerifySignature(knownSignedLocator, knownToken, blobSignatureTTL, []byte(knownKey)) != nil {
-               t.Fail()
-       }
-}
-
-func TestVerifySignatureExtraHints(t *testing.T) {
-       if VerifySignature(knownLocator+"+K@xyzzy"+knownSigHint, knownToken, blobSignatureTTL, []byte(knownKey)) != nil {
-               t.Fatal("Verify cannot handle hint before permission signature")
-       }
-
-       if VerifySignature(knownLocator+knownSigHint+"+Zfoo", knownToken, blobSignatureTTL, []byte(knownKey)) != nil {
-               t.Fatal("Verify cannot handle hint after permission signature")
-       }
-
-       if VerifySignature(knownLocator+"+K@xyzzy"+knownSigHint+"+Zfoo", knownToken, blobSignatureTTL, []byte(knownKey)) != nil {
-               t.Fatal("Verify cannot handle hints around permission signature")
-       }
-}
-
-// The size hint on the locator string should not affect signature validation.
-func TestVerifySignatureWrongSize(t *testing.T) {
-       if VerifySignature(knownHash+"+999999"+knownSigHint, knownToken, blobSignatureTTL, []byte(knownKey)) != nil {
-               t.Fatal("Verify cannot handle incorrect size hint")
-       }
-
-       if VerifySignature(knownHash+knownSigHint, knownToken, blobSignatureTTL, []byte(knownKey)) != nil {
-               t.Fatal("Verify cannot handle missing size hint")
-       }
-}
-
-func TestVerifySignatureBadSig(t *testing.T) {
-       badLocator := knownLocator + "+Aaaaaaaaaaaaaaaa@" + knownTimestamp
-       if VerifySignature(badLocator, knownToken, blobSignatureTTL, []byte(knownKey)) != ErrSignatureMissing {
-               t.Fail()
-       }
-}
-
-func TestVerifySignatureBadTimestamp(t *testing.T) {
-       badLocator := knownLocator + "+A" + knownSignature + "@OOOOOOOl"
-       if VerifySignature(badLocator, knownToken, blobSignatureTTL, []byte(knownKey)) != ErrSignatureMissing {
-               t.Fail()
-       }
-}
-
-func TestVerifySignatureBadSecret(t *testing.T) {
-       if VerifySignature(knownSignedLocator, knownToken, blobSignatureTTL, []byte("00000000000000000000")) != ErrSignatureInvalid {
-               t.Fail()
-       }
-}
-
-func TestVerifySignatureBadToken(t *testing.T) {
-       if VerifySignature(knownSignedLocator, "00000000", blobSignatureTTL, []byte(knownKey)) != ErrSignatureInvalid {
-               t.Fail()
-       }
-}
-
-func TestVerifySignatureExpired(t *testing.T) {
-       yesterday := time.Now().AddDate(0, 0, -1)
-       expiredLocator := SignLocator(knownHash, knownToken, yesterday, blobSignatureTTL, []byte(knownKey))
-       if VerifySignature(expiredLocator, knownToken, blobSignatureTTL, []byte(knownKey)) != ErrSignatureExpired {
-               t.Fail()
-       }
-}
index f1a8c292557b1ba89d9fa9b7b50ac519d718acb7..17ed6402ce0d79ab4dc1bddf30e6b0315df7fa16 100644 (file)
@@ -318,6 +318,57 @@ func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
        }
 }
 
+// Test TOUCH requests.
+func (s *HandlerSuite) TestTouchHandler(c *check.C) {
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+       vols := s.handler.volmgr.AllWritable()
+       vols[0].Put(context.Background(), TestHash, TestBlock)
+       vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
+       afterPut := time.Now()
+       t, err := vols[0].Mtime(TestHash)
+       c.Assert(err, check.IsNil)
+       c.Assert(t.Before(afterPut), check.Equals, true)
+
+       ExpectStatusCode(c,
+               "touch with no credentials",
+               http.StatusUnauthorized,
+               IssueRequest(s.handler, &RequestTester{
+                       method: "TOUCH",
+                       uri:    "/" + TestHash,
+               }))
+
+       ExpectStatusCode(c,
+               "touch with non-root credentials",
+               http.StatusUnauthorized,
+               IssueRequest(s.handler, &RequestTester{
+                       method:   "TOUCH",
+                       uri:      "/" + TestHash,
+                       apiToken: arvadostest.ActiveTokenV2,
+               }))
+
+       ExpectStatusCode(c,
+               "touch non-existent block",
+               http.StatusNotFound,
+               IssueRequest(s.handler, &RequestTester{
+                       method:   "TOUCH",
+                       uri:      "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+                       apiToken: s.cluster.SystemRootToken,
+               }))
+
+       beforeTouch := time.Now()
+       ExpectStatusCode(c,
+               "touch block",
+               http.StatusOK,
+               IssueRequest(s.handler, &RequestTester{
+                       method:   "TOUCH",
+                       uri:      "/" + TestHash,
+                       apiToken: s.cluster.SystemRootToken,
+               }))
+       t, err = vols[0].Mtime(TestHash)
+       c.Assert(err, check.IsNil)
+       c.Assert(t.After(beforeTouch), check.Equals, true)
+}
+
 // Test /index requests:
 //   - unauthenticated /index request
 //   - unauthenticated /index/prefix request
index 3d0f893d82fc20c8a01a833aa050f6203a1c70c7..eb0ea5ad2f133f3b8a569fa354255521f08c5965 100644 (file)
@@ -66,6 +66,8 @@ func MakeRESTRouter(ctx context.Context, cluster *arvados.Cluster, reg *promethe
        // List blocks stored here whose hash has the given prefix.
        // Privileged client only.
        rtr.HandleFunc(`/index/{prefix:[0-9a-f]{0,32}}`, rtr.handleIndex).Methods("GET", "HEAD")
+       // Update timestamp on existing block. Privileged client only.
+       rtr.HandleFunc(`/{hash:[0-9a-f]{32}}`, rtr.handleTOUCH).Methods("TOUCH")
 
        // Internals/debugging info (runtime.MemStats)
        rtr.HandleFunc(`/debug.json`, rtr.DebugHandler).Methods("GET", "HEAD")
@@ -191,6 +193,34 @@ func getBufferWithContext(ctx context.Context, bufs *bufferPool, bufSize int) ([
        }
 }
 
+func (rtr *router) handleTOUCH(resp http.ResponseWriter, req *http.Request) {
+       if !rtr.isSystemAuth(GetAPIToken(req)) {
+               http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
+               return
+       }
+       hash := mux.Vars(req)["hash"]
+       vols := rtr.volmgr.AllWritable()
+       if len(vols) == 0 {
+               http.Error(resp, "no volumes", http.StatusNotFound)
+               return
+       }
+       var err error
+       for _, mnt := range vols {
+               err = mnt.Touch(hash)
+               if err == nil {
+                       break
+               }
+       }
+       switch {
+       case err == nil:
+               return
+       case os.IsNotExist(err):
+               http.Error(resp, err.Error(), http.StatusNotFound)
+       default:
+               http.Error(resp, err.Error(), http.StatusInternalServerError)
+       }
+}
+
 func (rtr *router) handlePUT(resp http.ResponseWriter, req *http.Request) {
        ctx, cancel := contextForResponse(context.TODO(), resp)
        defer cancel()
index a928a71a2c0315a7666bb4f9ede138e3403d24cf..2de21edde6708faa3aff96ef32f2b61f191a9775 100644 (file)
@@ -178,13 +178,20 @@ func (v *MockVolume) Put(ctx context.Context, loc string, block []byte) error {
 }
 
 func (v *MockVolume) Touch(loc string) error {
+       return v.TouchWithDate(loc, time.Now())
+}
+
+func (v *MockVolume) TouchWithDate(loc string, t time.Time) error {
        v.gotCall("Touch")
        <-v.Gate
        if v.volume.ReadOnly {
                return MethodDisabledError
        }
+       if _, exists := v.Store[loc]; !exists {
+               return os.ErrNotExist
+       }
        if v.Touchable {
-               v.Timestamps[loc] = time.Now()
+               v.Timestamps[loc] = t
                return nil
        }
        return errors.New("Touch failed")