1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
12 "git.arvados.org/arvados.git/lib/dispatchcloud/container"
13 "git.arvados.org/arvados.git/sdk/go/arvados"
14 "github.com/sirupsen/logrus"
17 var quietAfter503 = time.Minute
19 type QueueEnt struct {
22 // Human-readable scheduling status as of the last scheduling
24 SchedulingStatus string `json:"scheduling_status"`
28 schedStatusPreparingRuntimeEnvironment = "preparing runtime environment"
29 schedStatusPriorityZero = "not scheduling: priority 0" // ", state X" appended at runtime
30 schedStatusContainerLimitReached = "not starting: supervisor container limit has been reached"
31 schedStatusWaitingForPreviousAttempt = "waiting for previous attempt to exit"
32 schedStatusWaitingNewInstance = "waiting for new instance to be ready"
33 schedStatusWaitingInstanceType = "waiting for suitable instance type to become available" // ": queue position X" appended at runtime
34 schedStatusWaitingCloudResources = "waiting for cloud resources"
35 schedStatusWaitingClusterCapacity = "waiting while cluster is running at capacity" // ": queue position X" appended at runtime
38 // Queue returns the sorted queue from the last scheduling iteration.
39 func (sch *Scheduler) Queue() []QueueEnt {
40 ents, _ := sch.lastQueue.Load().([]QueueEnt)
44 func (sch *Scheduler) runQueue() {
45 running := sch.pool.Running()
46 unalloc := sch.pool.Unallocated()
49 for _, n := range sch.pool.CountWorkers() {
53 unsorted, _ := sch.queue.Entries()
54 sorted := make([]QueueEnt, 0, len(unsorted))
55 for _, ent := range unsorted {
56 sorted = append(sorted, QueueEnt{QueueEnt: ent})
58 sort.Slice(sorted, func(i, j int) bool {
59 _, irunning := running[sorted[i].Container.UUID]
60 _, jrunning := running[sorted[j].Container.UUID]
61 if irunning != jrunning {
62 // Ensure the "tryrun" loop (see below) sees
63 // already-scheduled containers first, to
64 // ensure existing supervisor containers are
65 // properly counted before we decide whether
66 // we have room for new ones.
69 ilocked := sorted[i].Container.State == arvados.ContainerStateLocked
70 jlocked := sorted[j].Container.State == arvados.ContainerStateLocked
71 if ilocked != jlocked {
72 // Give precedence to containers that we have
73 // already locked, even if higher-priority
74 // containers have since arrived in the
75 // queue. This avoids undesirable queue churn
76 // effects including extra lock/unlock cycles
77 // and bringing up new instances and quickly
78 // shutting them down to make room for
79 // different instance sizes.
81 } else if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
84 // When containers have identical priority,
85 // start them in the order we first noticed
86 // them. This avoids extra lock/unlock cycles
87 // when we unlock the containers that don't
88 // fit in the available pool.
89 return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
93 if t := sch.client.Last503(); t.After(sch.last503time) {
94 // API has sent an HTTP 503 response since last time
95 // we checked. Use current #containers - 1 as
96 // maxConcurrency, i.e., try to stay just below the
97 // level where we see 503s.
99 if newlimit := len(running) - 1; newlimit < 1 {
100 sch.maxConcurrency = 1
102 sch.maxConcurrency = newlimit
104 } else if sch.maxConcurrency > 0 && time.Since(sch.last503time) > quietAfter503 {
105 // If we haven't seen any 503 errors lately, raise
106 // limit to ~10% beyond the current workload.
108 // As we use the added 10% to schedule more
109 // containers, len(running) will increase and we'll
110 // push the limit up further. Soon enough,
111 // maxConcurrency will get high enough to schedule the
112 // entire queue, hit pool quota, or get 503s again.
113 max := len(running)*11/10 + 1
114 if sch.maxConcurrency < max {
115 sch.maxConcurrency = max
118 if sch.last503time.IsZero() {
119 sch.mLast503Time.Set(0)
121 sch.mLast503Time.Set(float64(sch.last503time.Unix()))
123 if sch.maxInstances > 0 && sch.maxConcurrency > sch.maxInstances {
124 sch.maxConcurrency = sch.maxInstances
126 if sch.instancesWithinQuota > 0 && sch.instancesWithinQuota < totalInstances {
127 // Evidently it is possible to run this many
128 // instances, so raise our estimate.
129 sch.instancesWithinQuota = totalInstances
131 if sch.pool.AtQuota() {
132 // Consider current workload to be the maximum
133 // allowed, for the sake of reporting metrics and
134 // calculating max supervisors.
136 // Now that sch.maxConcurrency is set, we will only
137 // raise it past len(running) by 10%. This helps
138 // avoid running an inappropriate number of
139 // supervisors when we reach the cloud-imposed quota
140 // (which may be based on # CPUs etc) long before the
141 // configured MaxInstances.
142 if sch.maxConcurrency == 0 || sch.maxConcurrency > totalInstances {
143 if totalInstances == 0 {
144 sch.maxConcurrency = 1
146 sch.maxConcurrency = totalInstances
149 sch.instancesWithinQuota = totalInstances
150 } else if sch.instancesWithinQuota > 0 && sch.maxConcurrency > sch.instancesWithinQuota+1 {
151 // Once we've hit a quota error and started tracking
152 // instancesWithinQuota (i.e., it's not zero), we
153 // avoid exceeding that known-working level by more
156 // If we don't do this, we risk entering a pattern of
157 // repeatedly locking several containers, hitting
158 // quota again, and unlocking them again each time the
159 // driver stops reporting AtQuota, which tends to use
160 // up the max lock/unlock cycles on the next few
161 // containers in the queue, and cause them to fail.
162 sch.maxConcurrency = sch.instancesWithinQuota + 1
164 sch.mMaxContainerConcurrency.Set(float64(sch.maxConcurrency))
166 maxSupervisors := int(float64(sch.maxConcurrency) * sch.supervisorFraction)
167 if maxSupervisors < 1 && sch.supervisorFraction > 0 && sch.maxConcurrency > 0 {
171 sch.logger.WithFields(logrus.Fields{
172 "Containers": len(sorted),
173 "Processes": len(running),
174 "maxConcurrency": sch.maxConcurrency,
177 dontstart := map[arvados.InstanceType]bool{}
178 var atcapacity = map[string]bool{} // ProviderTypes reported as AtCapacity during this runQueue() invocation
179 var overquota []QueueEnt // entries that are unmappable because of worker pool quota
180 var overmaxsuper []QueueEnt // unmappable because max supervisors (these are not included in overquota)
181 var containerAllocatedWorkerBootingCount int
183 // trying is #containers running + #containers we're trying to
184 // start. We stop trying to start more containers if this
185 // reaches the dynamic maxConcurrency limit.
186 trying := len(running)
192 for i, ent := range sorted {
193 ctr, types := ent.Container, ent.InstanceTypes
194 logger := sch.logger.WithFields(logrus.Fields{
195 "ContainerUUID": ctr.UUID,
197 if ctr.SchedulingParameters.Supervisor {
200 if _, running := running[ctr.UUID]; running {
201 if ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked {
202 sorted[i].SchedulingStatus = schedStatusPreparingRuntimeEnvironment
206 if ctr.Priority < 1 {
207 sorted[i].SchedulingStatus = schedStatusPriorityZero + ", state " + string(ctr.State)
210 if ctr.SchedulingParameters.Supervisor && maxSupervisors > 0 && supervisors > maxSupervisors {
211 overmaxsuper = append(overmaxsuper, sorted[i])
212 sorted[i].SchedulingStatus = schedStatusContainerLimitReached
215 // If we have unalloc instances of any of the eligible
216 // instance types, unallocOK is true and unallocType
217 // is the lowest-cost type.
219 var unallocType arvados.InstanceType
220 for _, it := range types {
227 // If the pool is not reporting AtCapacity for any of
228 // the eligible instance types, availableOK is true
229 // and availableType is the lowest-cost type.
231 var availableType arvados.InstanceType
232 for _, it := range types {
233 if atcapacity[it.ProviderType] {
235 } else if sch.pool.AtCapacity(it) {
236 atcapacity[it.ProviderType] = true
245 case arvados.ContainerStateQueued:
246 if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
247 logger.Tracef("not locking: already at maxConcurrency %d", sch.maxConcurrency)
251 if !unallocOK && sch.pool.AtQuota() {
252 logger.Trace("not starting: AtQuota and no unalloc workers")
253 overquota = sorted[i:]
256 if !unallocOK && !availableOK {
257 logger.Trace("not locking: AtCapacity and no unalloc workers")
260 if sch.pool.KillContainer(ctr.UUID, "about to lock") {
261 logger.Info("not locking: crunch-run process from previous attempt has not exited")
264 go sch.lockContainer(logger, ctr.UUID)
265 unalloc[unallocType]--
266 case arvados.ContainerStateLocked:
267 if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
268 logger.Tracef("not starting: already at maxConcurrency %d", sch.maxConcurrency)
273 // We have a suitable instance type,
274 // so mark it as allocated, and try to
275 // start the container.
276 unalloc[unallocType]--
277 logger = logger.WithField("InstanceType", unallocType.Name)
278 if dontstart[unallocType] {
279 // We already tried & failed to start
280 // a higher-priority container on the
281 // same instance type. Don't let this
282 // one sneak in ahead of it.
283 } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
284 sorted[i].SchedulingStatus = schedStatusWaitingForPreviousAttempt
285 logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
286 } else if sch.pool.StartContainer(unallocType, ctr) {
287 sorted[i].SchedulingStatus = schedStatusPreparingRuntimeEnvironment
288 logger.Trace("StartContainer => true")
290 sorted[i].SchedulingStatus = schedStatusWaitingNewInstance
291 logger.Trace("StartContainer => false")
292 containerAllocatedWorkerBootingCount += 1
293 dontstart[unallocType] = true
297 if sch.pool.AtQuota() {
298 // Don't let lower-priority containers
299 // starve this one by using keeping
300 // idle workers alive on different
302 logger.Trace("overquota")
303 overquota = sorted[i:]
307 // Continue trying lower-priority
308 // containers in case they can run on
309 // different instance types that are
312 // The local "atcapacity" cache helps
313 // when the pool's flag resets after
314 // we look at container A but before
315 // we look at lower-priority container
316 // B. In that case we want to run
317 // container A on the next call to
318 // runQueue(), rather than run
321 sorted[i].SchedulingStatus = schedStatusWaitingInstanceType + fmt.Sprintf(": queue position %d", qpos)
322 logger.Trace("all eligible types at capacity")
325 logger = logger.WithField("InstanceType", availableType.Name)
326 if !sch.pool.Create(availableType) {
327 // Failed despite not being at quota,
328 // e.g., cloud ops throttled.
329 logger.Trace("pool declined to create new instance")
332 // Success. (Note pool.Create works
333 // asynchronously and does its own logging
334 // about the eventual outcome, so we don't
336 sorted[i].SchedulingStatus = schedStatusWaitingNewInstance
337 logger.Info("creating new instance")
338 // Don't bother trying to start the container
339 // yet -- obviously the instance will take
340 // some time to boot and become ready.
341 containerAllocatedWorkerBootingCount += 1
342 dontstart[availableType] = true
346 sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
347 sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota) + len(overmaxsuper)))
350 if sch.pool.AtQuota() {
351 qreason = schedStatusWaitingCloudResources
353 qreason = schedStatusWaitingClusterCapacity
355 for i, ent := range sorted {
356 if ent.SchedulingStatus == "" && (ent.Container.State == arvados.ContainerStateQueued || ent.Container.State == arvados.ContainerStateLocked) {
358 sorted[i].SchedulingStatus = fmt.Sprintf("%s: queue position %d", qreason, qpos)
361 sch.lastQueue.Store(sorted)
363 if len(overquota)+len(overmaxsuper) > 0 {
364 // Unlock any containers that are unmappable while
365 // we're at quota (but if they have already been
366 // scheduled and they're loading docker images etc.,
368 var unlock []QueueEnt
369 unlock = append(unlock, overmaxsuper...)
370 if totalInstances > 0 && len(overquota) > 1 {
371 // We don't unlock the next-in-line container
372 // when at quota. This avoids a situation
373 // where our "at quota" state expires, we lock
374 // the next container and try to create an
375 // instance, the cloud provider still returns
376 // a quota error, we unlock the container, and
377 // we repeat this until the container reaches
378 // its limit of lock/unlock cycles.
379 unlock = append(unlock, overquota[1:]...)
381 // However, if totalInstances is 0 and we're
382 // still getting quota errors, then the
383 // next-in-line container is evidently not
384 // possible to run, so we should let it
385 // exhaust its lock/unlock cycles and
386 // eventually cancel, to avoid starvation.
387 unlock = append(unlock, overquota...)
389 for _, ctr := range unlock {
391 _, toolate := running[ctr.UUID]
392 if ctr.State == arvados.ContainerStateLocked && !toolate {
393 logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
394 logger.Info("unlock because pool capacity is used by higher priority containers")
395 err := sch.queue.Unlock(ctr.UUID)
397 logger.WithError(err).Warn("error unlocking")
402 if len(overquota) > 0 {
403 // Shut down idle workers that didn't get any
404 // containers mapped onto them before we hit quota.
405 for it, n := range unalloc {
409 sch.pool.Shutdown(it)
414 // Lock the given container. Should be called in a new goroutine.
415 func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
416 if !sch.uuidLock(uuid, "lock") {
419 defer sch.uuidUnlock(uuid)
420 if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
421 // This happens if the container has been cancelled or
422 // locked since runQueue called sch.queue.Entries(),
423 // possibly by a lockContainer() call from a previous
424 // runQueue iteration. In any case, we will respond
425 // appropriately on the next runQueue iteration, which
426 // will have already been triggered by the queue
428 logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
431 err := sch.queue.Lock(uuid)
433 logger.WithError(err).Warn("error locking container")
436 logger.Debug("lock succeeded")
437 ctr, ok := sch.queue.Get(uuid)
439 logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
440 } else if ctr.State != arvados.ContainerStateLocked {
441 logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
445 // Acquire a non-blocking lock for specified UUID, returning true if
446 // successful. The op argument is used only for debug logs.
448 // If the lock is not available, uuidLock arranges to wake up the
449 // scheduler after a short delay, so it can retry whatever operation
450 // is trying to get the lock (if that operation is still worth doing).
452 // This mechanism helps avoid spamming the controller/database with
453 // concurrent updates for any single container, even when the
454 // scheduler loop is running frequently.
455 func (sch *Scheduler) uuidLock(uuid, op string) bool {
457 defer sch.mtx.Unlock()
458 logger := sch.logger.WithFields(logrus.Fields{
459 "ContainerUUID": uuid,
462 if op, locked := sch.uuidOp[uuid]; locked {
463 logger.Debugf("uuidLock not available, Op=%s in progress", op)
464 // Make sure the scheduler loop wakes up to retry.
465 sch.wakeup.Reset(time.Second / 4)
468 logger.Debug("uuidLock acquired")
469 sch.uuidOp[uuid] = op
473 func (sch *Scheduler) uuidUnlock(uuid string) {
475 defer sch.mtx.Unlock()
476 delete(sch.uuidOp, uuid)