1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
5 // Dispatcher service for Crunch that submits containers to the slurm queue.
21 "git.arvados.org/arvados.git/lib/cmd"
22 "git.arvados.org/arvados.git/lib/controller/dblock"
23 "git.arvados.org/arvados.git/lib/ctrlctx"
24 "git.arvados.org/arvados.git/lib/dispatchcloud"
25 "git.arvados.org/arvados.git/lib/service"
26 "git.arvados.org/arvados.git/sdk/go/arvados"
27 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
28 "git.arvados.org/arvados.git/sdk/go/ctxlog"
29 "git.arvados.org/arvados.git/sdk/go/dispatch"
30 "github.com/coreos/go-systemd/daemon"
31 "github.com/prometheus/client_golang/prometheus"
32 "github.com/sirupsen/logrus"
35 var Command cmd.Handler = service.Command(arvados.ServiceNameDispatchSLURM, newHandler)
37 func newHandler(ctx context.Context, cluster *arvados.Cluster, _ string, _ *prometheus.Registry) service.Handler {
38 logger := ctxlog.FromContext(ctx)
39 disp := &Dispatcher{logger: logger, cluster: cluster}
40 if err := disp.configure(); err != nil {
41 return service.ErrorHandler(ctx, cluster, err)
51 type logger interface {
53 Fatalf(string, ...interface{})
56 const initialNiceValue int64 = 10000
58 type Dispatcher struct {
60 logger logrus.FieldLogger
61 cluster *arvados.Cluster
62 sqCheck *SqueueChecker
64 dbConnector ctrlctx.DBConnector
72 func (disp *Dispatcher) CheckHealth() error {
76 func (disp *Dispatcher) Done() <-chan struct{} {
80 func (disp *Dispatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) {
84 // configure() loads config files. Some tests skip this (see
86 func (disp *Dispatcher) configure() error {
87 if disp.logger == nil {
88 disp.logger = logrus.StandardLogger()
90 disp.logger = disp.logger.WithField("ClusterID", disp.cluster.ClusterID)
91 disp.logger.Printf("crunch-dispatch-slurm %s started", cmd.Version.String())
93 disp.Client.APIHost = disp.cluster.Services.Controller.ExternalURL.Host
94 disp.Client.AuthToken = disp.cluster.SystemRootToken
95 disp.Client.Insecure = disp.cluster.TLS.Insecure
96 disp.dbConnector = ctrlctx.DBConnector{PostgreSQL: disp.cluster.PostgreSQL}
98 if disp.Client.APIHost != "" || disp.Client.AuthToken != "" {
99 // Copy real configs into env vars so [a]
100 // MakeArvadosClient() uses them, and [b] they get
101 // propagated to crunch-run via SLURM.
102 os.Setenv("ARVADOS_API_HOST", disp.Client.APIHost)
103 os.Setenv("ARVADOS_API_TOKEN", disp.Client.AuthToken)
104 os.Setenv("ARVADOS_API_HOST_INSECURE", "")
105 if disp.Client.Insecure {
106 os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
108 for k, v := range disp.cluster.Containers.SLURM.SbatchEnvironmentVariables {
112 disp.logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
117 // setup() initializes private fields after configure().
118 func (disp *Dispatcher) setup() {
119 disp.done = make(chan struct{})
120 arv, err := arvadosclient.MakeArvadosClient()
122 disp.logger.Fatalf("Error making Arvados client: %v", err)
126 disp.slurm = NewSlurmCLI()
127 disp.sqCheck = &SqueueChecker{
129 Period: time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),
130 PrioritySpread: disp.cluster.Containers.SLURM.PrioritySpread,
133 disp.Dispatcher = &dispatch.Dispatcher{
136 BatchSize: disp.cluster.API.MaxItemsPerResponse,
137 RunContainer: disp.runContainer,
138 PollPeriod: time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),
139 MinRetryPeriod: time.Duration(disp.cluster.Containers.MinRetryPeriod),
143 func (disp *Dispatcher) run() error {
144 dblock.Dispatch.Lock(context.Background(), disp.dbConnector.GetDB)
145 defer dblock.Dispatch.Unlock()
146 defer disp.sqCheck.Stop()
148 if disp.cluster != nil && len(disp.cluster.InstanceTypes) > 0 {
149 go SlurmNodeTypeFeatureKludge(disp.cluster)
152 if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
153 log.Printf("Error notifying init daemon: %v", err)
155 go disp.checkSqueueForOrphans()
156 return disp.Dispatcher.Run(context.Background())
159 var containerUuidPattern = regexp.MustCompile(`^[a-z0-9]{5}-dz642-[a-z0-9]{15}$`)
161 // Check the next squeue report, and invoke TrackContainer for all the
162 // containers in the report. This gives us a chance to cancel slurm
163 // jobs started by a previous dispatch process that never released
164 // their slurm allocations even though their container states are
165 // Cancelled or Complete. See https://dev.arvados.org/issues/10979
166 func (disp *Dispatcher) checkSqueueForOrphans() {
167 for _, uuid := range disp.sqCheck.All() {
168 if !containerUuidPattern.MatchString(uuid) || !strings.HasPrefix(uuid, disp.cluster.ClusterID) {
171 err := disp.TrackContainer(uuid)
173 log.Printf("checkSqueueForOrphans: TrackContainer(%s): %s", uuid, err)
178 func (disp *Dispatcher) slurmConstraintArgs(container arvados.Container) []string {
179 mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+
180 container.RuntimeConstraints.KeepCacheRAM+
181 int64(disp.cluster.Containers.ReserveExtraRAM)) / float64(1048576)))
183 disk := dispatchcloud.EstimateScratchSpace(&container)
184 disk = int64(math.Ceil(float64(disk) / float64(1048576)))
186 fmt.Sprintf("--mem=%d", mem),
187 fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs),
188 fmt.Sprintf("--tmp=%d", disk),
192 func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error) {
194 args = append(args, disp.cluster.Containers.SLURM.SbatchArgumentsList...)
195 args = append(args, "--job-name="+container.UUID, fmt.Sprintf("--nice=%d", initialNiceValue), "--no-requeue")
197 if disp.cluster == nil {
198 // no instance types configured
199 args = append(args, disp.slurmConstraintArgs(container)...)
200 } else if types, err := dispatchcloud.ChooseInstanceType(disp.cluster, &container); err == dispatchcloud.ErrInstanceTypesNotConfigured {
202 args = append(args, disp.slurmConstraintArgs(container)...)
203 } else if err != nil {
206 // use instancetype constraint instead of slurm
207 // mem/cpu/tmp specs (note types[0] is the lowest-cost
208 // suitable instance type)
209 args = append(args, "--constraint=instancetype="+types[0].Name)
212 if len(container.SchedulingParameters.Partitions) > 0 {
213 args = append(args, "--partition="+strings.Join(container.SchedulingParameters.Partitions, ","))
219 func (disp *Dispatcher) submit(container arvados.Container, crunchRunCommand []string) error {
220 // append() here avoids modifying crunchRunCommand's
221 // underlying array, which is shared with other goroutines.
222 crArgs := append([]string(nil), crunchRunCommand...)
223 crArgs = append(crArgs, "--runtime-engine="+disp.cluster.Containers.RuntimeEngine)
224 crArgs = append(crArgs, container.UUID)
226 h := hmac.New(sha256.New, []byte(disp.cluster.SystemRootToken))
227 fmt.Fprint(h, container.UUID)
228 authsecret := fmt.Sprintf("%x", h.Sum(nil))
230 crScript := strings.NewReader(execScript(crArgs, map[string]string{"GatewayAuthSecret": authsecret}))
232 sbArgs, err := disp.sbatchArgs(container)
236 log.Printf("running sbatch %+q", sbArgs)
237 return disp.slurm.Batch(crScript, sbArgs)
240 // Submit a container to the slurm queue (or resume monitoring if it's
241 // already in the queue). Cancel the slurm job if the container's
242 // priority changes to zero or its state indicates it's no longer
244 func (disp *Dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) error {
245 ctx, cancel := context.WithCancel(context.Background())
248 if ctr.State == dispatch.Locked && !disp.sqCheck.HasUUID(ctr.UUID) {
249 log.Printf("Submitting container %s to slurm", ctr.UUID)
250 cmd := []string{disp.cluster.Containers.CrunchRunCommand}
251 cmd = append(cmd, disp.cluster.Containers.CrunchRunArgumentsList...)
252 err := disp.submit(ctr, cmd)
258 log.Printf("Start monitoring container %v in state %q", ctr.UUID, ctr.State)
259 defer log.Printf("Done monitoring container %s", ctr.UUID)
261 // If the container disappears from the slurm queue, there is
262 // no point in waiting for further dispatch updates: just
263 // clean up and return.
264 go func(uuid string) {
265 for ctx.Err() == nil && disp.sqCheck.HasUUID(uuid) {
273 // Disappeared from squeue
274 if err := disp.Arv.Get("containers", ctr.UUID, nil, &ctr); err != nil {
275 log.Printf("error getting final container state for %s: %s", ctr.UUID, err)
278 case dispatch.Running:
279 disp.UpdateState(ctr.UUID, dispatch.Cancelled)
280 case dispatch.Locked:
281 disp.Unlock(ctr.UUID)
284 case updated, ok := <-status:
286 log.Printf("container %s is done: cancel slurm job", ctr.UUID)
288 } else if updated.Priority == 0 {
289 log.Printf("container %s has state %q, priority %d: cancel slurm job", ctr.UUID, updated.State, updated.Priority)
292 p := int64(updated.Priority)
295 // user-assigned priority. If
296 // ctrs have equal priority,
297 // run the older one first.
298 p = int64(p)<<50 - (updated.CreatedAt.UnixNano() >> 14)
300 disp.sqCheck.SetPriority(ctr.UUID, p)
305 func (disp *Dispatcher) scancel(ctr arvados.Container) {
306 err := disp.slurm.Cancel(ctr.UUID)
308 log.Printf("scancel: %s", err)
309 time.Sleep(time.Second)
310 } else if disp.sqCheck.HasUUID(ctr.UUID) {
311 log.Printf("container %s is still in squeue after scancel", ctr.UUID)
312 time.Sleep(time.Second)