1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
13 "git.curoverse.com/arvados.git/sdk/go/arvados"
14 "github.com/ghodss/yaml"
17 type deprRequestLimits struct {
18 MaxItemsPerResponse *int
19 MultiClusterRequestConcurrency *int
22 type deprCluster struct {
23 RequestLimits deprRequestLimits
24 NodeProfiles map[string]nodeProfile
27 type deprecatedConfig struct {
28 Clusters map[string]deprCluster
31 type nodeProfile struct {
32 Controller systemServiceInstance `json:"arvados-controller"`
33 Health systemServiceInstance `json:"arvados-health"`
34 Keepbalance systemServiceInstance `json:"keep-balance"`
35 Keepproxy systemServiceInstance `json:"keepproxy"`
36 Keepstore systemServiceInstance `json:"keepstore"`
37 Keepweb systemServiceInstance `json:"keep-web"`
38 Nodemanager systemServiceInstance `json:"arvados-node-manager"`
39 DispatchCloud systemServiceInstance `json:"arvados-dispatch-cloud"`
40 RailsAPI systemServiceInstance `json:"arvados-api-server"`
41 Websocket systemServiceInstance `json:"arvados-ws"`
42 Workbench1 systemServiceInstance `json:"arvados-workbench"`
45 type systemServiceInstance struct {
51 func (ldr *Loader) applyDeprecatedConfig(cfg *arvados.Config) error {
52 var dc deprecatedConfig
53 err := yaml.Unmarshal(ldr.configdata, &dc)
57 hostname, err := os.Hostname()
61 for id, dcluster := range dc.Clusters {
62 cluster, ok := cfg.Clusters[id]
64 return fmt.Errorf("can't load legacy config %q that is not present in current config", id)
66 for name, np := range dcluster.NodeProfiles {
67 if name == "*" || name == os.Getenv("ARVADOS_NODE_PROFILE") || name == hostname {
69 } else if ldr.Logger != nil {
70 ldr.Logger.Warnf("overriding Clusters.%s.Services using Clusters.%s.NodeProfiles.%s (guessing %q is a hostname)", id, id, name, name)
72 applyDeprecatedNodeProfile(name, np.RailsAPI, &cluster.Services.RailsAPI)
73 applyDeprecatedNodeProfile(name, np.Controller, &cluster.Services.Controller)
74 applyDeprecatedNodeProfile(name, np.DispatchCloud, &cluster.Services.DispatchCloud)
76 if dst, n := &cluster.API.MaxItemsPerResponse, dcluster.RequestLimits.MaxItemsPerResponse; n != nil && *n != *dst {
79 if dst, n := &cluster.API.MaxRequestAmplification, dcluster.RequestLimits.MultiClusterRequestConcurrency; n != nil && *n != *dst {
82 cfg.Clusters[id] = cluster
87 func applyDeprecatedNodeProfile(hostname string, ssi systemServiceInstance, svc *arvados.Service) {
92 if svc.InternalURLs == nil {
93 svc.InternalURLs = map[arvados.URL]arvados.ServiceInstance{}
99 if strings.HasPrefix(host, ":") {
100 host = hostname + host
102 svc.InternalURLs[arvados.URL{Scheme: scheme, Host: host}] = arvados.ServiceInstance{}
105 const defaultKeepstoreConfigPath = "/etc/arvados/keepstore/keepstore.yml"
107 type oldKeepstoreConfig struct {
111 func (ldr *Loader) loadOldConfigHelper(component, path string, target interface{}) error {
115 buf, err := ioutil.ReadFile(path)
120 ldr.Logger.Warnf("you should remove the legacy %v config file (%s) after migrating all config keys to the cluster configuration file (%s)", component, path, ldr.Path)
122 err = yaml.Unmarshal(buf, target)
124 return fmt.Errorf("%s: %s", path, err)
129 // update config using values from an old-style keepstore config file.
130 func (ldr *Loader) loadOldKeepstoreConfig(cfg *arvados.Config) error {
131 var oc oldKeepstoreConfig
132 err := ldr.loadOldConfigHelper("keepstore", ldr.KeepstorePath, &oc)
133 if os.IsNotExist(err) && ldr.KeepstorePath == defaultKeepstoreConfigPath {
135 } else if err != nil {
139 cluster, err := cfg.GetCluster("")
144 if v := oc.Debug; v == nil {
145 } else if *v && cluster.SystemLogs.LogLevel != "debug" {
146 cluster.SystemLogs.LogLevel = "debug"
147 } else if !*v && cluster.SystemLogs.LogLevel != "info" {
148 cluster.SystemLogs.LogLevel = "info"
151 cfg.Clusters[cluster.ClusterID] = *cluster
155 type oldCrunchDispatchSlurmConfig struct {
156 Client *arvados.Client
158 SbatchArguments *[]string
159 PollPeriod *arvados.Duration
160 PrioritySpread *int64
162 // crunch-run command to invoke. The container UUID will be
163 // appended. If nil, []string{"crunch-run"} will be used.
165 // Example: []string{"crunch-run", "--cgroup-parent-subsystem=memory"}
166 CrunchRunCommand *[]string
168 // Extra RAM to reserve (in Bytes) for SLURM job, in addition
169 // to the amount specified in the container's RuntimeConstraints
170 ReserveExtraRAM *int64
172 // Minimum time between two attempts to run the same container
173 MinRetryPeriod *arvados.Duration
175 // Batch size for container queries
179 const defaultCrunchDispatchSlurmConfigPath = "/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml"
181 // update config using values from an crunch-dispatch-slurm config file.
182 func (ldr *Loader) loadOldCrunchDispatchSlurmConfig(cfg *arvados.Config) error {
183 var oc oldCrunchDispatchSlurmConfig
184 err := ldr.loadOldConfigHelper("crunch-dispatch-slurm", ldr.CrunchDispatchSlurmPath, &oc)
185 if os.IsNotExist(err) && ldr.CrunchDispatchSlurmPath == defaultCrunchDispatchSlurmConfigPath {
187 } else if err != nil {
191 cluster, err := cfg.GetCluster("")
196 if oc.Client != nil {
198 u.Host = oc.Client.APIHost
199 if oc.Client.Scheme != "" {
200 u.Scheme = oc.Client.Scheme
204 cluster.Services.Controller.ExternalURL = u
205 cluster.SystemRootToken = oc.Client.AuthToken
206 cluster.TLS.Insecure = oc.Client.Insecure
209 if oc.SbatchArguments != nil {
210 cluster.Containers.SLURM.SbatchArgumentsList = *oc.SbatchArguments
212 if oc.PollPeriod != nil {
213 cluster.Containers.CloudVMs.PollInterval = *oc.PollPeriod
215 if oc.PrioritySpread != nil {
216 cluster.Containers.SLURM.PrioritySpread = *oc.PrioritySpread
218 if oc.CrunchRunCommand != nil {
219 if len(*oc.CrunchRunCommand) >= 1 {
220 cluster.Containers.CrunchRunCommand = (*oc.CrunchRunCommand)[0]
222 if len(*oc.CrunchRunCommand) >= 2 {
223 cluster.Containers.CrunchRunArgumentsList = (*oc.CrunchRunCommand)[1:]
226 if oc.ReserveExtraRAM != nil {
227 cluster.Containers.ReserveExtraRAM = arvados.ByteSize(*oc.ReserveExtraRAM)
229 if oc.MinRetryPeriod != nil {
230 cluster.Containers.MinRetryPeriod = *oc.MinRetryPeriod
232 if oc.BatchSize != nil {
233 cluster.API.MaxItemsPerResponse = int(*oc.BatchSize)
236 cfg.Clusters[cluster.ClusterID] = *cluster