1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
18 "git.arvados.org/arvados.git/lib/config"
19 "git.arvados.org/arvados.git/lib/service"
20 "git.arvados.org/arvados.git/sdk/go/arvados"
21 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
22 "git.arvados.org/arvados.git/sdk/go/ctxlog"
23 "git.arvados.org/arvados.git/sdk/go/keepclient"
24 "github.com/prometheus/client_golang/prometheus"
25 "github.com/sirupsen/logrus"
30 Command = service.Command(arvados.ServiceNameKeepstore, newHandlerOrErrorHandler)
33 func runCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
34 args, ok := convertKeepstoreFlagsToServiceFlags(args, ctxlog.FromContext(context.Background()))
38 return Command.RunCommand(prog, args, stdin, stdout, stderr)
41 // Parse keepstore command line flags, and return equivalent
42 // service.Command flags. The second return value ("ok") is true if
43 // all provided flags were successfully converted.
44 func convertKeepstoreFlagsToServiceFlags(args []string, lgr logrus.FieldLogger) ([]string, bool) {
45 flags := flag.NewFlagSet("", flag.ContinueOnError)
46 flags.String("listen", "", "Services.Keepstore.InternalURLs")
47 flags.Int("max-buffers", 0, "API.MaxKeepBlobBuffers")
48 flags.Int("max-requests", 0, "API.MaxConcurrentRequests")
49 flags.Bool("never-delete", false, "Collections.BlobTrash")
50 flags.Bool("enforce-permissions", false, "Collections.BlobSigning")
51 flags.String("permission-key-file", "", "Collections.BlobSigningKey")
52 flags.String("blob-signing-key-file", "", "Collections.BlobSigningKey")
53 flags.String("data-manager-token-file", "", "SystemRootToken")
54 flags.Int("permission-ttl", 0, "Collections.BlobSigningTTL")
55 flags.Int("blob-signature-ttl", 0, "Collections.BlobSigningTTL")
56 flags.String("trash-lifetime", "", "Collections.BlobTrashLifetime")
57 flags.Bool("serialize", false, "Volumes.*.DriverParameters.Serialize")
58 flags.Bool("readonly", false, "Volumes.*.ReadOnly")
59 flags.String("pid", "", "-")
60 flags.String("trash-check-interval", "", "Collections.BlobTrashCheckInterval")
62 flags.String("azure-storage-container-volume", "", "Volumes.*.Driver")
63 flags.String("azure-storage-account-name", "", "Volumes.*.DriverParameters.StorageAccountName")
64 flags.String("azure-storage-account-key-file", "", "Volumes.*.DriverParameters.StorageAccountKey")
65 flags.String("azure-storage-replication", "", "Volumes.*.Replication")
66 flags.String("azure-max-get-bytes", "", "Volumes.*.DriverParameters.MaxDataReadSize")
68 flags.String("s3-bucket-volume", "", "Volumes.*.DriverParameters.Bucket")
69 flags.String("s3-region", "", "Volumes.*.DriverParameters.Region")
70 flags.String("s3-endpoint", "", "Volumes.*.DriverParameters.Endpoint")
71 flags.String("s3-access-key-file", "", "Volumes.*.DriverParameters.AccessKeyID")
72 flags.String("s3-secret-key-file", "", "Volumes.*.DriverParameters.SecretAccessKey")
73 flags.String("s3-race-window", "", "Volumes.*.DriverParameters.RaceWindow")
74 flags.String("s3-replication", "", "Volumes.*.Replication")
75 flags.String("s3-unsafe-delete", "", "Volumes.*.DriverParameters.UnsafeDelete")
77 flags.String("volume", "", "Volumes")
79 flags.Bool("version", false, "")
80 flags.String("config", "", "")
81 flags.String("legacy-keepstore-config", "", "")
83 err := flags.Parse(args)
84 if err == flag.ErrHelp {
85 return []string{"-help"}, true
86 } else if err != nil {
92 flags.Visit(func(f *flag.Flag) {
93 if f.Name == "config" || f.Name == "legacy-keepstore-config" || f.Name == "version" {
94 args = append(args, "-"+f.Name, f.Value.String())
95 } else if f.Usage == "-" {
97 lgr.Errorf("command line flag -%s is no longer supported", f.Name)
100 lgr.Errorf("command line flag -%s is no longer supported -- use Clusters.*.%s in cluster config file instead", f.Name, f.Usage)
107 flags = flag.NewFlagSet("", flag.ExitOnError)
108 loader := config.NewLoader(nil, lgr)
109 loader.SetupFlags(flags)
110 return loader.MungeLegacyConfigArgs(lgr, args, "-legacy-keepstore-config"), true
113 type handler struct {
115 Cluster *arvados.Cluster
116 Logger logrus.FieldLogger
120 volmgr *RRVolumeManager
121 keepClient *keepclient.KeepClient
127 func (h *handler) CheckHealth() error {
131 func (h *handler) Done() <-chan struct{} {
135 func newHandlerOrErrorHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {
137 serviceURL, ok := service.URLFromContext(ctx)
139 return service.ErrorHandler(ctx, cluster, errors.New("BUG: no URL from service.URLFromContext"))
141 err := h.setup(ctx, cluster, token, reg, serviceURL)
143 return service.ErrorHandler(ctx, cluster, err)
148 func (h *handler) setup(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry, serviceURL arvados.URL) error {
150 h.Logger = ctxlog.FromContext(ctx)
151 if h.Cluster.API.MaxKeepBlobBuffers <= 0 {
152 return fmt.Errorf("API.MaxKeepBlobBuffers must be greater than zero")
154 bufs = newBufferPool(h.Logger, h.Cluster.API.MaxKeepBlobBuffers, BlockSize)
156 if h.Cluster.API.MaxConcurrentRequests > 0 && h.Cluster.API.MaxConcurrentRequests < h.Cluster.API.MaxKeepBlobBuffers {
157 h.Logger.Warnf("Possible configuration mistake: not useful to set API.MaxKeepBlobBuffers (%d) higher than API.MaxConcurrentRequests (%d)", h.Cluster.API.MaxKeepBlobBuffers, h.Cluster.API.MaxConcurrentRequests)
160 if h.Cluster.Collections.BlobSigningKey != "" {
161 } else if h.Cluster.Collections.BlobSigning {
162 return errors.New("cannot enable Collections.BlobSigning with no Collections.BlobSigningKey")
164 h.Logger.Warn("Running without a blob signing key. Block locators returned by this server will not be signed, and will be rejected by a server that enforces permissions. To fix this, configure Collections.BlobSigning and Collections.BlobSigningKey.")
167 if len(h.Cluster.Volumes) == 0 {
168 return errors.New("no volumes configured")
171 h.Logger.Printf("keepstore %s starting, pid %d", version, os.Getpid())
173 // Start a round-robin VolumeManager with the configured volumes.
174 vm, err := makeRRVolumeManager(h.Logger, h.Cluster, serviceURL, newVolumeMetricsVecs(reg))
178 if len(vm.readables) == 0 {
179 return fmt.Errorf("no volumes configured for %s", serviceURL)
183 // Initialize the pullq and workers
184 h.pullq = NewWorkQueue()
185 for i := 0; i < 1 || i < h.Cluster.Collections.BlobReplicateConcurrency; i++ {
186 go h.runPullWorker(h.pullq)
189 // Initialize the trashq and workers
190 h.trashq = NewWorkQueue()
191 for i := 0; i < 1 || i < h.Cluster.Collections.BlobTrashConcurrency; i++ {
192 go RunTrashWorker(h.volmgr, h.Logger, h.Cluster, h.trashq)
195 // Set up routes and metrics
196 h.Handler = MakeRESTRouter(ctx, cluster, reg, vm, h.pullq, h.trashq)
198 // Initialize keepclient for pull workers
199 c, err := arvados.NewClientFromConfig(cluster)
203 ac, err := arvadosclient.New(c)
207 h.keepClient = &keepclient.KeepClient{
211 h.keepClient.Arvados.ApiToken = fmt.Sprintf("%x", rand.Int63())
213 if d := h.Cluster.Collections.BlobTrashCheckInterval.Duration(); d > 0 {
214 go emptyTrash(h.volmgr.writables, d)