1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
11 "git.arvados.org/arvados.git/lib/dispatchcloud/container"
12 "git.arvados.org/arvados.git/sdk/go/arvados"
13 "github.com/sirupsen/logrus"
16 var quietAfter503 = time.Minute
18 func (sch *Scheduler) runQueue() {
19 running := sch.pool.Running()
20 unalloc := sch.pool.Unallocated()
23 for _, n := range sch.pool.CountWorkers() {
27 unsorted, _ := sch.queue.Entries()
28 sorted := make([]container.QueueEnt, 0, len(unsorted))
29 for _, ent := range unsorted {
30 sorted = append(sorted, ent)
32 sort.Slice(sorted, func(i, j int) bool {
33 _, irunning := running[sorted[i].Container.UUID]
34 _, jrunning := running[sorted[j].Container.UUID]
35 if irunning != jrunning {
36 // Ensure the "tryrun" loop (see below) sees
37 // already-scheduled containers first, to
38 // ensure existing supervisor containers are
39 // properly counted before we decide whether
40 // we have room for new ones.
43 ilocked := sorted[i].Container.State == arvados.ContainerStateLocked
44 jlocked := sorted[j].Container.State == arvados.ContainerStateLocked
45 if ilocked != jlocked {
46 // Give precedence to containers that we have
47 // already locked, even if higher-priority
48 // containers have since arrived in the
49 // queue. This avoids undesirable queue churn
50 // effects including extra lock/unlock cycles
51 // and bringing up new instances and quickly
52 // shutting them down to make room for
53 // different instance sizes.
55 } else if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
58 // When containers have identical priority,
59 // start them in the order we first noticed
60 // them. This avoids extra lock/unlock cycles
61 // when we unlock the containers that don't
62 // fit in the available pool.
63 return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
67 if t := sch.client.Last503(); t.After(sch.last503time) {
68 // API has sent an HTTP 503 response since last time
69 // we checked. Use current #containers - 1 as
70 // maxConcurrency, i.e., try to stay just below the
71 // level where we see 503s.
73 if newlimit := len(running) - 1; newlimit < 1 {
74 sch.maxConcurrency = 1
76 sch.maxConcurrency = newlimit
78 } else if sch.maxConcurrency > 0 && time.Since(sch.last503time) > quietAfter503 {
79 // If we haven't seen any 503 errors lately, raise
80 // limit to ~10% beyond the current workload.
82 // As we use the added 10% to schedule more
83 // containers, len(running) will increase and we'll
84 // push the limit up further. Soon enough,
85 // maxConcurrency will get high enough to schedule the
86 // entire queue, hit pool quota, or get 503s again.
87 max := len(running)*11/10 + 1
88 if sch.maxConcurrency < max {
89 sch.maxConcurrency = max
92 if sch.last503time.IsZero() {
93 sch.mLast503Time.Set(0)
95 sch.mLast503Time.Set(float64(sch.last503time.Unix()))
97 if sch.maxInstances > 0 && sch.maxConcurrency > sch.maxInstances {
98 sch.maxConcurrency = sch.maxInstances
100 if sch.instancesWithinQuota > 0 && sch.instancesWithinQuota < totalInstances {
101 // Evidently it is possible to run this many
102 // instances, so raise our estimate.
103 sch.instancesWithinQuota = totalInstances
105 if sch.pool.AtQuota() {
106 // Consider current workload to be the maximum
107 // allowed, for the sake of reporting metrics and
108 // calculating max supervisors.
110 // Now that sch.maxConcurrency is set, we will only
111 // raise it past len(running) by 10%. This helps
112 // avoid running an inappropriate number of
113 // supervisors when we reach the cloud-imposed quota
114 // (which may be based on # CPUs etc) long before the
115 // configured MaxInstances.
116 if sch.maxConcurrency == 0 || sch.maxConcurrency > totalInstances {
117 if totalInstances == 0 {
118 sch.maxConcurrency = 1
120 sch.maxConcurrency = totalInstances
123 sch.instancesWithinQuota = totalInstances
124 } else if sch.instancesWithinQuota > 0 && sch.maxConcurrency > sch.instancesWithinQuota+1 {
125 // Once we've hit a quota error and started tracking
126 // instancesWithinQuota (i.e., it's not zero), we
127 // avoid exceeding that known-working level by more
130 // If we don't do this, we risk entering a pattern of
131 // repeatedly locking several containers, hitting
132 // quota again, and unlocking them again each time the
133 // driver stops reporting AtQuota, which tends to use
134 // up the max lock/unlock cycles on the next few
135 // containers in the queue, and cause them to fail.
136 sch.maxConcurrency = sch.instancesWithinQuota + 1
138 sch.mMaxContainerConcurrency.Set(float64(sch.maxConcurrency))
140 maxSupervisors := int(float64(sch.maxConcurrency) * sch.supervisorFraction)
141 if maxSupervisors < 1 && sch.supervisorFraction > 0 && sch.maxConcurrency > 0 {
145 sch.logger.WithFields(logrus.Fields{
146 "Containers": len(sorted),
147 "Processes": len(running),
148 "maxConcurrency": sch.maxConcurrency,
151 dontstart := map[arvados.InstanceType]bool{}
152 var atcapacity = map[string]bool{} // ProviderTypes reported as AtCapacity during this runQueue() invocation
153 var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota
154 var overmaxsuper []container.QueueEnt // unmappable because max supervisors (these are not included in overquota)
155 var containerAllocatedWorkerBootingCount int
157 // trying is #containers running + #containers we're trying to
158 // start. We stop trying to start more containers if this
159 // reaches the dynamic maxConcurrency limit.
160 trying := len(running)
165 for i, ent := range sorted {
166 ctr, it := ent.Container, ent.InstanceType
167 logger := sch.logger.WithFields(logrus.Fields{
168 "ContainerUUID": ctr.UUID,
169 "InstanceType": it.Name,
171 if ctr.SchedulingParameters.Supervisor {
173 if maxSupervisors > 0 && supervisors > maxSupervisors {
174 overmaxsuper = append(overmaxsuper, sorted[i])
178 if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
182 case arvados.ContainerStateQueued:
183 if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
184 logger.Tracef("not locking: already at maxConcurrency %d", sch.maxConcurrency)
188 if unalloc[it] < 1 && sch.pool.AtQuota() {
189 logger.Trace("not locking: AtQuota and no unalloc workers")
190 overquota = sorted[i:]
193 if unalloc[it] < 1 && (atcapacity[it.ProviderType] || sch.pool.AtCapacity(it)) {
194 logger.Trace("not locking: AtCapacity and no unalloc workers")
195 atcapacity[it.ProviderType] = true
198 if sch.pool.KillContainer(ctr.UUID, "about to lock") {
199 logger.Info("not locking: crunch-run process from previous attempt has not exited")
202 go sch.lockContainer(logger, ctr.UUID)
204 case arvados.ContainerStateLocked:
205 if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
206 logger.Tracef("not starting: already at maxConcurrency %d", sch.maxConcurrency)
212 } else if sch.pool.AtQuota() {
213 // Don't let lower-priority containers
214 // starve this one by using keeping
215 // idle workers alive on different
217 logger.Trace("overquota")
218 overquota = sorted[i:]
220 } else if atcapacity[it.ProviderType] || sch.pool.AtCapacity(it) {
221 // Continue trying lower-priority
222 // containers in case they can run on
223 // different instance types that are
226 // The local "atcapacity" cache helps
227 // when the pool's flag resets after
228 // we look at container A but before
229 // we look at lower-priority container
230 // B. In that case we want to run
231 // container A on the next call to
232 // runQueue(), rather than run
235 // TODO: try running this container on
236 // a bigger (but not much more
237 // expensive) instance type.
238 logger.WithField("InstanceType", it.Name).Trace("at capacity")
239 atcapacity[it.ProviderType] = true
241 } else if sch.pool.Create(it) {
242 // Success. (Note pool.Create works
243 // asynchronously and does its own
244 // logging about the eventual outcome,
245 // so we don't need to.)
246 logger.Info("creating new instance")
248 // Failed despite not being at quota,
249 // e.g., cloud ops throttled.
250 logger.Trace("pool declined to create new instance")
255 // We already tried & failed to start
256 // a higher-priority container on the
257 // same instance type. Don't let this
258 // one sneak in ahead of it.
259 } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
260 logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
261 } else if sch.pool.StartContainer(it, ctr) {
262 logger.Trace("StartContainer => true")
265 logger.Trace("StartContainer => false")
266 containerAllocatedWorkerBootingCount += 1
272 sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
273 sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota) + len(overmaxsuper)))
275 if len(overquota)+len(overmaxsuper) > 0 {
276 // Unlock any containers that are unmappable while
277 // we're at quota (but if they have already been
278 // scheduled and they're loading docker images etc.,
280 var unlock []container.QueueEnt
281 unlock = append(unlock, overmaxsuper...)
282 if totalInstances > 0 && len(overquota) > 1 {
283 // We don't unlock the next-in-line container
284 // when at quota. This avoids a situation
285 // where our "at quota" state expires, we lock
286 // the next container and try to create an
287 // instance, the cloud provider still returns
288 // a quota error, we unlock the container, and
289 // we repeat this until the container reaches
290 // its limit of lock/unlock cycles.
291 unlock = append(unlock, overquota[1:]...)
293 // However, if totalInstances is 0 and we're
294 // still getting quota errors, then the
295 // next-in-line container is evidently not
296 // possible to run, so we should let it
297 // exhaust its lock/unlock cycles and
298 // eventually cancel, to avoid starvation.
299 unlock = append(unlock, overquota...)
301 for _, ctr := range unlock {
303 _, toolate := running[ctr.UUID]
304 if ctr.State == arvados.ContainerStateLocked && !toolate {
305 logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
306 logger.Info("unlock because pool capacity is used by higher priority containers")
307 err := sch.queue.Unlock(ctr.UUID)
309 logger.WithError(err).Warn("error unlocking")
314 if len(overquota) > 0 {
315 // Shut down idle workers that didn't get any
316 // containers mapped onto them before we hit quota.
317 for it, n := range unalloc {
321 sch.pool.Shutdown(it)
326 // Lock the given container. Should be called in a new goroutine.
327 func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
328 if !sch.uuidLock(uuid, "lock") {
331 defer sch.uuidUnlock(uuid)
332 if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
333 // This happens if the container has been cancelled or
334 // locked since runQueue called sch.queue.Entries(),
335 // possibly by a lockContainer() call from a previous
336 // runQueue iteration. In any case, we will respond
337 // appropriately on the next runQueue iteration, which
338 // will have already been triggered by the queue
340 logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
343 err := sch.queue.Lock(uuid)
345 logger.WithError(err).Warn("error locking container")
348 logger.Debug("lock succeeded")
349 ctr, ok := sch.queue.Get(uuid)
351 logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
352 } else if ctr.State != arvados.ContainerStateLocked {
353 logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
357 // Acquire a non-blocking lock for specified UUID, returning true if
358 // successful. The op argument is used only for debug logs.
360 // If the lock is not available, uuidLock arranges to wake up the
361 // scheduler after a short delay, so it can retry whatever operation
362 // is trying to get the lock (if that operation is still worth doing).
364 // This mechanism helps avoid spamming the controller/database with
365 // concurrent updates for any single container, even when the
366 // scheduler loop is running frequently.
367 func (sch *Scheduler) uuidLock(uuid, op string) bool {
369 defer sch.mtx.Unlock()
370 logger := sch.logger.WithFields(logrus.Fields{
371 "ContainerUUID": uuid,
374 if op, locked := sch.uuidOp[uuid]; locked {
375 logger.Debugf("uuidLock not available, Op=%s in progress", op)
376 // Make sure the scheduler loop wakes up to retry.
377 sch.wakeup.Reset(time.Second / 4)
380 logger.Debug("uuidLock acquired")
381 sch.uuidOp[uuid] = op
385 func (sch *Scheduler) uuidUnlock(uuid string) {
387 defer sch.mtx.Unlock()
388 delete(sch.uuidOp, uuid)