21700: Install Bundler system-wide in Rails postinst
[arvados.git] / lib / dispatchcloud / scheduler / run_queue.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package scheduler
6
7 import (
8         "fmt"
9         "sort"
10         "time"
11
12         "git.arvados.org/arvados.git/lib/dispatchcloud/container"
13         "git.arvados.org/arvados.git/sdk/go/arvados"
14         "github.com/sirupsen/logrus"
15 )
16
17 var quietAfter503 = time.Minute
18
19 type QueueEnt struct {
20         container.QueueEnt
21
22         // Human-readable scheduling status as of the last scheduling
23         // iteration.
24         SchedulingStatus string `json:"scheduling_status"`
25 }
26
27 const (
28         schedStatusPreparingRuntimeEnvironment = "preparing runtime environment"
29         schedStatusPriorityZero                = "not scheduling: priority 0" // ", state X" appended at runtime
30         schedStatusContainerLimitReached       = "not starting: supervisor container limit has been reached"
31         schedStatusWaitingForPreviousAttempt   = "waiting for previous attempt to exit"
32         schedStatusWaitingNewInstance          = "waiting for new instance to be ready"
33         schedStatusWaitingInstanceType         = "waiting for suitable instance type to become available" // ": queue position X" appended at runtime
34         schedStatusWaitingCloudResources       = "waiting for cloud resources"
35         schedStatusWaitingClusterCapacity      = "waiting while cluster is running at capacity" // ": queue position X" appended at runtime
36 )
37
38 // Queue returns the sorted queue from the last scheduling iteration.
39 func (sch *Scheduler) Queue() []QueueEnt {
40         ents, _ := sch.lastQueue.Load().([]QueueEnt)
41         return ents
42 }
43
44 func (sch *Scheduler) runQueue() {
45         running := sch.pool.Running()
46         unalloc := sch.pool.Unallocated()
47
48         totalInstances := 0
49         for _, n := range sch.pool.CountWorkers() {
50                 totalInstances += n
51         }
52
53         unsorted, _ := sch.queue.Entries()
54         sorted := make([]QueueEnt, 0, len(unsorted))
55         for _, ent := range unsorted {
56                 sorted = append(sorted, QueueEnt{QueueEnt: ent})
57         }
58         sort.Slice(sorted, func(i, j int) bool {
59                 _, irunning := running[sorted[i].Container.UUID]
60                 _, jrunning := running[sorted[j].Container.UUID]
61                 if irunning != jrunning {
62                         // Ensure the "tryrun" loop (see below) sees
63                         // already-scheduled containers first, to
64                         // ensure existing supervisor containers are
65                         // properly counted before we decide whether
66                         // we have room for new ones.
67                         return irunning
68                 }
69                 ilocked := sorted[i].Container.State == arvados.ContainerStateLocked
70                 jlocked := sorted[j].Container.State == arvados.ContainerStateLocked
71                 if ilocked != jlocked {
72                         // Give precedence to containers that we have
73                         // already locked, even if higher-priority
74                         // containers have since arrived in the
75                         // queue. This avoids undesirable queue churn
76                         // effects including extra lock/unlock cycles
77                         // and bringing up new instances and quickly
78                         // shutting them down to make room for
79                         // different instance sizes.
80                         return ilocked
81                 } else if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
82                         return pi > pj
83                 } else {
84                         // When containers have identical priority,
85                         // start them in the order we first noticed
86                         // them. This avoids extra lock/unlock cycles
87                         // when we unlock the containers that don't
88                         // fit in the available pool.
89                         return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
90                 }
91         })
92
93         if t := sch.client.Last503(); t.After(sch.last503time) {
94                 // API has sent an HTTP 503 response since last time
95                 // we checked. Use current #containers - 1 as
96                 // maxConcurrency, i.e., try to stay just below the
97                 // level where we see 503s.
98                 sch.last503time = t
99                 if newlimit := len(running) - 1; newlimit < 1 {
100                         sch.maxConcurrency = 1
101                 } else {
102                         sch.maxConcurrency = newlimit
103                 }
104         } else if sch.maxConcurrency > 0 && time.Since(sch.last503time) > quietAfter503 {
105                 // If we haven't seen any 503 errors lately, raise
106                 // limit to ~10% beyond the current workload.
107                 //
108                 // As we use the added 10% to schedule more
109                 // containers, len(running) will increase and we'll
110                 // push the limit up further. Soon enough,
111                 // maxConcurrency will get high enough to schedule the
112                 // entire queue, hit pool quota, or get 503s again.
113                 max := len(running)*11/10 + 1
114                 if sch.maxConcurrency < max {
115                         sch.maxConcurrency = max
116                 }
117         }
118         if sch.last503time.IsZero() {
119                 sch.mLast503Time.Set(0)
120         } else {
121                 sch.mLast503Time.Set(float64(sch.last503time.Unix()))
122         }
123         if sch.maxInstances > 0 && sch.maxConcurrency > sch.maxInstances {
124                 sch.maxConcurrency = sch.maxInstances
125         }
126         if sch.instancesWithinQuota > 0 && sch.instancesWithinQuota < totalInstances {
127                 // Evidently it is possible to run this many
128                 // instances, so raise our estimate.
129                 sch.instancesWithinQuota = totalInstances
130         }
131         if sch.pool.AtQuota() {
132                 // Consider current workload to be the maximum
133                 // allowed, for the sake of reporting metrics and
134                 // calculating max supervisors.
135                 //
136                 // Now that sch.maxConcurrency is set, we will only
137                 // raise it past len(running) by 10%.  This helps
138                 // avoid running an inappropriate number of
139                 // supervisors when we reach the cloud-imposed quota
140                 // (which may be based on # CPUs etc) long before the
141                 // configured MaxInstances.
142                 if sch.maxConcurrency == 0 || sch.maxConcurrency > totalInstances {
143                         if totalInstances == 0 {
144                                 sch.maxConcurrency = 1
145                         } else {
146                                 sch.maxConcurrency = totalInstances
147                         }
148                 }
149                 sch.instancesWithinQuota = totalInstances
150         } else if sch.instancesWithinQuota > 0 && sch.maxConcurrency > sch.instancesWithinQuota+1 {
151                 // Once we've hit a quota error and started tracking
152                 // instancesWithinQuota (i.e., it's not zero), we
153                 // avoid exceeding that known-working level by more
154                 // than 1.
155                 //
156                 // If we don't do this, we risk entering a pattern of
157                 // repeatedly locking several containers, hitting
158                 // quota again, and unlocking them again each time the
159                 // driver stops reporting AtQuota, which tends to use
160                 // up the max lock/unlock cycles on the next few
161                 // containers in the queue, and cause them to fail.
162                 sch.maxConcurrency = sch.instancesWithinQuota + 1
163         }
164         sch.mMaxContainerConcurrency.Set(float64(sch.maxConcurrency))
165
166         maxSupervisors := int(float64(sch.maxConcurrency) * sch.supervisorFraction)
167         if maxSupervisors < 1 && sch.supervisorFraction > 0 && sch.maxConcurrency > 0 {
168                 maxSupervisors = 1
169         }
170
171         sch.logger.WithFields(logrus.Fields{
172                 "Containers":     len(sorted),
173                 "Processes":      len(running),
174                 "maxConcurrency": sch.maxConcurrency,
175         }).Debug("runQueue")
176
177         dontstart := map[arvados.InstanceType]bool{}
178         var atcapacity = map[string]bool{} // ProviderTypes reported as AtCapacity during this runQueue() invocation
179         var overquota []QueueEnt           // entries that are unmappable because of worker pool quota
180         var overmaxsuper []QueueEnt        // unmappable because max supervisors (these are not included in overquota)
181         var containerAllocatedWorkerBootingCount int
182
183         // trying is #containers running + #containers we're trying to
184         // start. We stop trying to start more containers if this
185         // reaches the dynamic maxConcurrency limit.
186         trying := len(running)
187
188         qpos := 0
189         supervisors := 0
190
191 tryrun:
192         for i, ent := range sorted {
193                 ctr, types := ent.Container, ent.InstanceTypes
194                 logger := sch.logger.WithFields(logrus.Fields{
195                         "ContainerUUID": ctr.UUID,
196                 })
197                 if ctr.SchedulingParameters.Supervisor {
198                         supervisors += 1
199                 }
200                 if _, running := running[ctr.UUID]; running {
201                         if ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked {
202                                 sorted[i].SchedulingStatus = schedStatusPreparingRuntimeEnvironment
203                         }
204                         continue
205                 }
206                 if ctr.Priority < 1 {
207                         sorted[i].SchedulingStatus = schedStatusPriorityZero + ", state " + string(ctr.State)
208                         continue
209                 }
210                 if ctr.SchedulingParameters.Supervisor && maxSupervisors > 0 && supervisors > maxSupervisors {
211                         overmaxsuper = append(overmaxsuper, sorted[i])
212                         sorted[i].SchedulingStatus = schedStatusContainerLimitReached
213                         continue
214                 }
215                 // If we have unalloc instances of any of the eligible
216                 // instance types, unallocOK is true and unallocType
217                 // is the lowest-cost type.
218                 var unallocOK bool
219                 var unallocType arvados.InstanceType
220                 for _, it := range types {
221                         if unalloc[it] > 0 {
222                                 unallocOK = true
223                                 unallocType = it
224                                 break
225                         }
226                 }
227                 // If the pool is not reporting AtCapacity for any of
228                 // the eligible instance types, availableOK is true
229                 // and availableType is the lowest-cost type.
230                 var availableOK bool
231                 var availableType arvados.InstanceType
232                 for _, it := range types {
233                         if atcapacity[it.ProviderType] {
234                                 continue
235                         } else if sch.pool.AtCapacity(it) {
236                                 atcapacity[it.ProviderType] = true
237                                 continue
238                         } else {
239                                 availableOK = true
240                                 availableType = it
241                                 break
242                         }
243                 }
244                 switch ctr.State {
245                 case arvados.ContainerStateQueued:
246                         if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
247                                 logger.Tracef("not locking: already at maxConcurrency %d", sch.maxConcurrency)
248                                 continue
249                         }
250                         trying++
251                         if !unallocOK && sch.pool.AtQuota() {
252                                 logger.Trace("not starting: AtQuota and no unalloc workers")
253                                 overquota = sorted[i:]
254                                 break tryrun
255                         }
256                         if !unallocOK && !availableOK {
257                                 logger.Trace("not locking: AtCapacity and no unalloc workers")
258                                 continue
259                         }
260                         if sch.pool.KillContainer(ctr.UUID, "about to lock") {
261                                 logger.Info("not locking: crunch-run process from previous attempt has not exited")
262                                 continue
263                         }
264                         go sch.lockContainer(logger, ctr.UUID)
265                         unalloc[unallocType]--
266                 case arvados.ContainerStateLocked:
267                         if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
268                                 logger.Tracef("not starting: already at maxConcurrency %d", sch.maxConcurrency)
269                                 continue
270                         }
271                         trying++
272                         if unallocOK {
273                                 // We have a suitable instance type,
274                                 // so mark it as allocated, and try to
275                                 // start the container.
276                                 unalloc[unallocType]--
277                                 logger = logger.WithField("InstanceType", unallocType.Name)
278                                 if dontstart[unallocType] {
279                                         // We already tried & failed to start
280                                         // a higher-priority container on the
281                                         // same instance type. Don't let this
282                                         // one sneak in ahead of it.
283                                 } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
284                                         sorted[i].SchedulingStatus = schedStatusWaitingForPreviousAttempt
285                                         logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
286                                 } else if sch.pool.StartContainer(unallocType, ctr) {
287                                         sorted[i].SchedulingStatus = schedStatusPreparingRuntimeEnvironment
288                                         logger.Trace("StartContainer => true")
289                                 } else {
290                                         sorted[i].SchedulingStatus = schedStatusWaitingNewInstance
291                                         logger.Trace("StartContainer => false")
292                                         containerAllocatedWorkerBootingCount += 1
293                                         dontstart[unallocType] = true
294                                 }
295                                 continue
296                         }
297                         if sch.pool.AtQuota() {
298                                 // Don't let lower-priority containers
299                                 // starve this one by using keeping
300                                 // idle workers alive on different
301                                 // instance types.
302                                 logger.Trace("overquota")
303                                 overquota = sorted[i:]
304                                 break tryrun
305                         }
306                         if !availableOK {
307                                 // Continue trying lower-priority
308                                 // containers in case they can run on
309                                 // different instance types that are
310                                 // available.
311                                 //
312                                 // The local "atcapacity" cache helps
313                                 // when the pool's flag resets after
314                                 // we look at container A but before
315                                 // we look at lower-priority container
316                                 // B. In that case we want to run
317                                 // container A on the next call to
318                                 // runQueue(), rather than run
319                                 // container B now.
320                                 qpos++
321                                 sorted[i].SchedulingStatus = schedStatusWaitingInstanceType + fmt.Sprintf(": queue position %d", qpos)
322                                 logger.Trace("all eligible types at capacity")
323                                 continue
324                         }
325                         logger = logger.WithField("InstanceType", availableType.Name)
326                         if !sch.pool.Create(availableType) {
327                                 // Failed despite not being at quota,
328                                 // e.g., cloud ops throttled.
329                                 logger.Trace("pool declined to create new instance")
330                                 continue
331                         }
332                         // Success. (Note pool.Create works
333                         // asynchronously and does its own logging
334                         // about the eventual outcome, so we don't
335                         // need to.)
336                         sorted[i].SchedulingStatus = schedStatusWaitingNewInstance
337                         logger.Info("creating new instance")
338                         // Don't bother trying to start the container
339                         // yet -- obviously the instance will take
340                         // some time to boot and become ready.
341                         containerAllocatedWorkerBootingCount += 1
342                         dontstart[availableType] = true
343                 }
344         }
345
346         sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
347         sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota) + len(overmaxsuper)))
348
349         var qreason string
350         if sch.pool.AtQuota() {
351                 qreason = schedStatusWaitingCloudResources
352         } else {
353                 qreason = schedStatusWaitingClusterCapacity
354         }
355         for i, ent := range sorted {
356                 if ent.SchedulingStatus == "" && (ent.Container.State == arvados.ContainerStateQueued || ent.Container.State == arvados.ContainerStateLocked) {
357                         qpos++
358                         sorted[i].SchedulingStatus = fmt.Sprintf("%s: queue position %d", qreason, qpos)
359                 }
360         }
361         sch.lastQueue.Store(sorted)
362
363         if len(overquota)+len(overmaxsuper) > 0 {
364                 // Unlock any containers that are unmappable while
365                 // we're at quota (but if they have already been
366                 // scheduled and they're loading docker images etc.,
367                 // let them run).
368                 var unlock []QueueEnt
369                 unlock = append(unlock, overmaxsuper...)
370                 if totalInstances > 0 && len(overquota) > 1 {
371                         // We don't unlock the next-in-line container
372                         // when at quota.  This avoids a situation
373                         // where our "at quota" state expires, we lock
374                         // the next container and try to create an
375                         // instance, the cloud provider still returns
376                         // a quota error, we unlock the container, and
377                         // we repeat this until the container reaches
378                         // its limit of lock/unlock cycles.
379                         unlock = append(unlock, overquota[1:]...)
380                 } else {
381                         // However, if totalInstances is 0 and we're
382                         // still getting quota errors, then the
383                         // next-in-line container is evidently not
384                         // possible to run, so we should let it
385                         // exhaust its lock/unlock cycles and
386                         // eventually cancel, to avoid starvation.
387                         unlock = append(unlock, overquota...)
388                 }
389                 for _, ctr := range unlock {
390                         ctr := ctr.Container
391                         _, toolate := running[ctr.UUID]
392                         if ctr.State == arvados.ContainerStateLocked && !toolate {
393                                 logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
394                                 logger.Info("unlock because pool capacity is used by higher priority containers")
395                                 err := sch.queue.Unlock(ctr.UUID)
396                                 if err != nil {
397                                         logger.WithError(err).Warn("error unlocking")
398                                 }
399                         }
400                 }
401         }
402         if len(overquota) > 0 {
403                 // Shut down idle workers that didn't get any
404                 // containers mapped onto them before we hit quota.
405                 for it, n := range unalloc {
406                         if n < 1 {
407                                 continue
408                         }
409                         sch.pool.Shutdown(it)
410                 }
411         }
412 }
413
414 // Lock the given container. Should be called in a new goroutine.
415 func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
416         if !sch.uuidLock(uuid, "lock") {
417                 return
418         }
419         defer sch.uuidUnlock(uuid)
420         if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
421                 // This happens if the container has been cancelled or
422                 // locked since runQueue called sch.queue.Entries(),
423                 // possibly by a lockContainer() call from a previous
424                 // runQueue iteration. In any case, we will respond
425                 // appropriately on the next runQueue iteration, which
426                 // will have already been triggered by the queue
427                 // update.
428                 logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
429                 return
430         }
431         err := sch.queue.Lock(uuid)
432         if err != nil {
433                 logger.WithError(err).Warn("error locking container")
434                 return
435         }
436         logger.Debug("lock succeeded")
437         ctr, ok := sch.queue.Get(uuid)
438         if !ok {
439                 logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
440         } else if ctr.State != arvados.ContainerStateLocked {
441                 logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
442         }
443 }
444
445 // Acquire a non-blocking lock for specified UUID, returning true if
446 // successful.  The op argument is used only for debug logs.
447 //
448 // If the lock is not available, uuidLock arranges to wake up the
449 // scheduler after a short delay, so it can retry whatever operation
450 // is trying to get the lock (if that operation is still worth doing).
451 //
452 // This mechanism helps avoid spamming the controller/database with
453 // concurrent updates for any single container, even when the
454 // scheduler loop is running frequently.
455 func (sch *Scheduler) uuidLock(uuid, op string) bool {
456         sch.mtx.Lock()
457         defer sch.mtx.Unlock()
458         logger := sch.logger.WithFields(logrus.Fields{
459                 "ContainerUUID": uuid,
460                 "Op":            op,
461         })
462         if op, locked := sch.uuidOp[uuid]; locked {
463                 logger.Debugf("uuidLock not available, Op=%s in progress", op)
464                 // Make sure the scheduler loop wakes up to retry.
465                 sch.wakeup.Reset(time.Second / 4)
466                 return false
467         }
468         logger.Debug("uuidLock acquired")
469         sch.uuidOp[uuid] = op
470         return true
471 }
472
473 func (sch *Scheduler) uuidUnlock(uuid string) {
474         sch.mtx.Lock()
475         defer sch.mtx.Unlock()
476         delete(sch.uuidOp, uuid)
477 }