1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
11 "git.arvados.org/arvados.git/lib/dispatchcloud/container"
12 "git.arvados.org/arvados.git/sdk/go/arvados"
13 "github.com/sirupsen/logrus"
16 var quietAfter503 = time.Minute
18 func (sch *Scheduler) runQueue() {
19 running := sch.pool.Running()
20 unalloc := sch.pool.Unallocated()
22 unsorted, _ := sch.queue.Entries()
23 sorted := make([]container.QueueEnt, 0, len(unsorted))
24 for _, ent := range unsorted {
25 sorted = append(sorted, ent)
27 sort.Slice(sorted, func(i, j int) bool {
28 _, irunning := running[sorted[i].Container.UUID]
29 _, jrunning := running[sorted[j].Container.UUID]
30 if irunning != jrunning {
31 // Ensure the "tryrun" loop (see below) sees
32 // already-scheduled containers first, to
33 // ensure existing supervisor containers are
34 // properly counted before we decide whether
35 // we have room for new ones.
38 ilocked := sorted[i].Container.State == arvados.ContainerStateLocked
39 jlocked := sorted[j].Container.State == arvados.ContainerStateLocked
40 if ilocked != jlocked {
41 // Give precedence to containers that we have
42 // already locked, even if higher-priority
43 // containers have since arrived in the
44 // queue. This avoids undesirable queue churn
45 // effects including extra lock/unlock cycles
46 // and bringing up new instances and quickly
47 // shutting them down to make room for
48 // different instance sizes.
50 } else if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
53 // When containers have identical priority,
54 // start them in the order we first noticed
55 // them. This avoids extra lock/unlock cycles
56 // when we unlock the containers that don't
57 // fit in the available pool.
58 return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
62 if t := sch.client.Last503(); t.After(sch.last503time) {
63 // API has sent an HTTP 503 response since last time
64 // we checked. Use current #containers - 1 as
65 // maxConcurrency, i.e., try to stay just below the
66 // level where we see 503s.
68 if newlimit := len(running) - 1; newlimit < 1 {
69 sch.maxConcurrency = 1
71 sch.maxConcurrency = newlimit
73 } else if sch.maxConcurrency > 0 && time.Since(sch.last503time) > quietAfter503 {
74 // If we haven't seen any 503 errors lately, raise
75 // limit to ~10% beyond the current workload.
77 // As we use the added 10% to schedule more
78 // containers, len(running) will increase and we'll
79 // push the limit up further. Soon enough,
80 // maxConcurrency will get high enough to schedule the
81 // entire queue, hit pool quota, or get 503s again.
82 max := len(running)*11/10 + 1
83 if sch.maxConcurrency < max {
84 sch.maxConcurrency = max
87 if sch.last503time.IsZero() {
88 sch.mLast503Time.Set(0)
90 sch.mLast503Time.Set(float64(sch.last503time.Unix()))
92 if sch.maxInstances > 0 && sch.maxConcurrency > sch.maxInstances {
93 sch.maxConcurrency = sch.maxInstances
95 instances := len(running) + len(unalloc)
96 if sch.instancesWithinQuota > 0 && sch.instancesWithinQuota < instances {
97 // Evidently it is possible to run this many
98 // instances, so raise our estimate.
99 sch.instancesWithinQuota = instances
101 if sch.pool.AtQuota() {
102 // Consider current workload to be the maximum
103 // allowed, for the sake of reporting metrics and
104 // calculating max supervisors.
106 // Now that sch.maxConcurrency is set, we will only
107 // raise it past len(running) by 10%. This helps
108 // avoid running an inappropriate number of
109 // supervisors when we reach the cloud-imposed quota
110 // (which may be based on # CPUs etc) long before the
111 // configured MaxInstances.
112 if sch.maxConcurrency == 0 || sch.maxConcurrency > instances {
114 sch.maxConcurrency = 1
116 sch.maxConcurrency = instances
119 sch.instancesWithinQuota = instances
120 } else if sch.instancesWithinQuota > 0 && sch.maxConcurrency > sch.instancesWithinQuota+1 {
121 // Once we've hit a quota error and started tracking
122 // instancesWithinQuota (i.e., it's not zero), we
123 // avoid exceeding that known-working level by more
126 // If we don't do this, we risk entering a pattern of
127 // repeatedly locking several containers, hitting
128 // quota again, and unlocking them again each time the
129 // driver stops reporting AtQuota, which tends to use
130 // up the max lock/unlock cycles on the next few
131 // containers in the queue, and cause them to fail.
132 sch.maxConcurrency = sch.instancesWithinQuota + 1
134 sch.mMaxContainerConcurrency.Set(float64(sch.maxConcurrency))
136 maxSupervisors := int(float64(sch.maxConcurrency) * sch.supervisorFraction)
137 if maxSupervisors < 1 && sch.supervisorFraction > 0 && sch.maxConcurrency > 0 {
141 sch.logger.WithFields(logrus.Fields{
142 "Containers": len(sorted),
143 "Processes": len(running),
144 "maxConcurrency": sch.maxConcurrency,
147 dontstart := map[arvados.InstanceType]bool{}
148 var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota
149 var overmaxsuper []container.QueueEnt // unmappable because max supervisors (these are not included in overquota)
150 var containerAllocatedWorkerBootingCount int
152 // trying is #containers running + #containers we're trying to
153 // start. We stop trying to start more containers if this
154 // reaches the dynamic maxConcurrency limit.
155 trying := len(running)
160 for i, ent := range sorted {
161 ctr, it := ent.Container, ent.InstanceType
162 logger := sch.logger.WithFields(logrus.Fields{
163 "ContainerUUID": ctr.UUID,
164 "InstanceType": it.Name,
166 if ctr.SchedulingParameters.Supervisor {
168 if maxSupervisors > 0 && supervisors > maxSupervisors {
169 overmaxsuper = append(overmaxsuper, sorted[i])
173 if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
177 case arvados.ContainerStateQueued:
178 if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
179 logger.Tracef("not locking: already at maxConcurrency %d", sch.maxConcurrency)
183 if unalloc[it] < 1 && sch.pool.AtQuota() {
184 logger.Trace("not locking: AtQuota and no unalloc workers")
185 overquota = sorted[i:]
188 if sch.pool.KillContainer(ctr.UUID, "about to lock") {
189 logger.Info("not locking: crunch-run process from previous attempt has not exited")
192 go sch.lockContainer(logger, ctr.UUID)
194 case arvados.ContainerStateLocked:
195 if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
196 logger.Tracef("not starting: already at maxConcurrency %d", sch.maxConcurrency)
202 } else if sch.pool.AtQuota() {
203 // Don't let lower-priority containers
204 // starve this one by using keeping
205 // idle workers alive on different
207 logger.Trace("overquota")
208 overquota = sorted[i:]
210 } else if sch.pool.Create(it) {
211 // Success. (Note pool.Create works
212 // asynchronously and does its own
213 // logging about the eventual outcome,
214 // so we don't need to.)
215 logger.Info("creating new instance")
217 // Failed despite not being at quota,
218 // e.g., cloud ops throttled. TODO:
219 // avoid getting starved here if
220 // instances of a specific type always
222 logger.Trace("pool declined to create new instance")
227 // We already tried & failed to start
228 // a higher-priority container on the
229 // same instance type. Don't let this
230 // one sneak in ahead of it.
231 } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
232 logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
233 } else if sch.pool.StartContainer(it, ctr) {
234 logger.Trace("StartContainer => true")
237 logger.Trace("StartContainer => false")
238 containerAllocatedWorkerBootingCount += 1
244 sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
245 sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota) + len(overmaxsuper)))
247 if len(overquota)+len(overmaxsuper) > 0 {
248 // Unlock any containers that are unmappable while
249 // we're at quota (but if they have already been
250 // scheduled and they're loading docker images etc.,
252 for _, ctr := range append(overmaxsuper, overquota...) {
254 _, toolate := running[ctr.UUID]
255 if ctr.State == arvados.ContainerStateLocked && !toolate {
256 logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
257 logger.Info("unlock because pool capacity is used by higher priority containers")
258 err := sch.queue.Unlock(ctr.UUID)
260 logger.WithError(err).Warn("error unlocking")
265 if len(overquota) > 0 {
266 // Shut down idle workers that didn't get any
267 // containers mapped onto them before we hit quota.
268 for it, n := range unalloc {
272 sch.pool.Shutdown(it)
277 // Lock the given container. Should be called in a new goroutine.
278 func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
279 if !sch.uuidLock(uuid, "lock") {
282 defer sch.uuidUnlock(uuid)
283 if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
284 // This happens if the container has been cancelled or
285 // locked since runQueue called sch.queue.Entries(),
286 // possibly by a lockContainer() call from a previous
287 // runQueue iteration. In any case, we will respond
288 // appropriately on the next runQueue iteration, which
289 // will have already been triggered by the queue
291 logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
294 err := sch.queue.Lock(uuid)
296 logger.WithError(err).Warn("error locking container")
299 logger.Debug("lock succeeded")
300 ctr, ok := sch.queue.Get(uuid)
302 logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
303 } else if ctr.State != arvados.ContainerStateLocked {
304 logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
308 // Acquire a non-blocking lock for specified UUID, returning true if
309 // successful. The op argument is used only for debug logs.
311 // If the lock is not available, uuidLock arranges to wake up the
312 // scheduler after a short delay, so it can retry whatever operation
313 // is trying to get the lock (if that operation is still worth doing).
315 // This mechanism helps avoid spamming the controller/database with
316 // concurrent updates for any single container, even when the
317 // scheduler loop is running frequently.
318 func (sch *Scheduler) uuidLock(uuid, op string) bool {
320 defer sch.mtx.Unlock()
321 logger := sch.logger.WithFields(logrus.Fields{
322 "ContainerUUID": uuid,
325 if op, locked := sch.uuidOp[uuid]; locked {
326 logger.Debugf("uuidLock not available, Op=%s in progress", op)
327 // Make sure the scheduler loop wakes up to retry.
328 sch.wakeup.Reset(time.Second / 4)
331 logger.Debug("uuidLock acquired")
332 sch.uuidOp[uuid] = op
336 func (sch *Scheduler) uuidUnlock(uuid string) {
338 defer sch.mtx.Unlock()
339 delete(sch.uuidOp, uuid)