1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
11 "git.arvados.org/arvados.git/lib/dispatchcloud/container"
12 "git.arvados.org/arvados.git/sdk/go/arvados"
13 "github.com/sirupsen/logrus"
16 var quietAfter503 = time.Minute
18 func isSupervisor(ctr arvados.Container) bool {
19 return (len(ctr.Command) > 0 && ctr.Command[0] == "arvados-cwl-runner") || ctr.SchedulingParameters.Supervisor
22 func (sch *Scheduler) runQueue() {
23 unsorted, _ := sch.queue.Entries()
24 sorted := make([]container.QueueEnt, 0, len(unsorted))
25 for _, ent := range unsorted {
26 sorted = append(sorted, ent)
28 sort.Slice(sorted, func(i, j int) bool {
29 if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
32 // When containers have identical priority,
33 // start them in the order we first noticed
34 // them. This avoids extra lock/unlock cycles
35 // when we unlock the containers that don't
36 // fit in the available pool.
37 return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
41 running := sch.pool.Running()
42 unalloc := sch.pool.Unallocated()
44 if t := sch.client.Last503(); t.After(sch.last503time) {
45 // API has sent an HTTP 503 response since last time
46 // we checked. Use current #containers - 1 as
47 // maxConcurrency, i.e., try to stay just below the
48 // level where we see 503s.
50 if newlimit := len(running) - 1; newlimit < 1 {
51 sch.maxConcurrency = 1
53 sch.maxConcurrency = newlimit
55 } else if sch.maxConcurrency > 0 && time.Since(sch.last503time) > quietAfter503 {
56 // If we haven't seen any 503 errors lately, raise
57 // limit to ~10% beyond the current workload.
59 // As we use the added 10% to schedule more
60 // containers, len(running) will increase and we'll
61 // push the limit up further. Soon enough,
62 // maxConcurrency will get high enough to schedule the
63 // entire queue, hit pool quota, or get 503s again.
64 max := len(running)*11/10 + 1
65 if sch.maxConcurrency < max {
66 sch.maxConcurrency = max
69 if sch.last503time.IsZero() {
70 sch.mLast503Time.Set(0)
72 sch.mLast503Time.Set(float64(sch.last503time.Unix()))
74 sch.mMaxContainerConcurrency.Set(float64(sch.maxConcurrency))
76 sch.logger.WithFields(logrus.Fields{
77 "Containers": len(sorted),
78 "Processes": len(running),
79 "maxConcurrency": sch.maxConcurrency,
82 dontstart := map[arvados.InstanceType]bool{}
83 var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota
84 var containerAllocatedWorkerBootingCount int
86 // trying is #containers running + #containers we're trying to
87 // start. We stop trying to start more containers if this
88 // reaches the dynamic maxConcurrency limit.
89 trying := len(running)
94 for i, ctr := range sorted {
95 ctr, it := ctr.Container, ctr.InstanceType
96 logger := sch.logger.WithFields(logrus.Fields{
97 "ContainerUUID": ctr.UUID,
98 "InstanceType": it.Name,
100 if isSupervisor(ctr) {
102 if sch.maxSupervisors > 0 && supervisors > sch.maxSupervisors {
106 if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
110 case arvados.ContainerStateQueued:
111 if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
112 logger.Tracef("not locking: already at maxConcurrency %d", sch.maxConcurrency)
113 overquota = sorted[i:]
117 if unalloc[it] < 1 && sch.pool.AtQuota() {
118 logger.Trace("not locking: AtQuota and no unalloc workers")
119 overquota = sorted[i:]
122 if sch.pool.KillContainer(ctr.UUID, "about to lock") {
123 logger.Info("not locking: crunch-run process from previous attempt has not exited")
126 go sch.lockContainer(logger, ctr.UUID)
128 case arvados.ContainerStateLocked:
129 if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
130 logger.Debugf("not starting: already at maxConcurrency %d", sch.maxConcurrency)
131 overquota = sorted[i:]
137 } else if sch.pool.AtQuota() {
138 // Don't let lower-priority containers
139 // starve this one by using keeping
140 // idle workers alive on different
142 logger.Trace("overquota")
143 overquota = sorted[i:]
145 } else if sch.pool.Create(it) {
146 // Success. (Note pool.Create works
147 // asynchronously and does its own
148 // logging about the eventual outcome,
149 // so we don't need to.)
150 logger.Info("creating new instance")
152 // Failed despite not being at quota,
153 // e.g., cloud ops throttled. TODO:
154 // avoid getting starved here if
155 // instances of a specific type always
157 logger.Trace("pool declined to create new instance")
162 // We already tried & failed to start
163 // a higher-priority container on the
164 // same instance type. Don't let this
165 // one sneak in ahead of it.
166 } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
167 logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
168 } else if sch.pool.StartContainer(it, ctr) {
169 logger.Trace("StartContainer => true")
172 logger.Trace("StartContainer => false")
173 containerAllocatedWorkerBootingCount += 1
179 sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
180 sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota)))
182 if len(overquota) > 0 {
183 // Unlock any containers that are unmappable while
184 // we're at quota (but if they have already been
185 // scheduled and they're loading docker images etc.,
187 for _, ctr := range overquota {
189 _, toolate := running[ctr.UUID]
190 if ctr.State == arvados.ContainerStateLocked && !toolate {
191 logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
192 logger.Debug("unlock because pool capacity is used by higher priority containers")
193 err := sch.queue.Unlock(ctr.UUID)
195 logger.WithError(err).Warn("error unlocking")
199 // Shut down idle workers that didn't get any
200 // containers mapped onto them before we hit quota.
201 for it, n := range unalloc {
205 sch.pool.Shutdown(it)
210 // Lock the given container. Should be called in a new goroutine.
211 func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
212 if !sch.uuidLock(uuid, "lock") {
215 defer sch.uuidUnlock(uuid)
216 if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
217 // This happens if the container has been cancelled or
218 // locked since runQueue called sch.queue.Entries(),
219 // possibly by a lockContainer() call from a previous
220 // runQueue iteration. In any case, we will respond
221 // appropriately on the next runQueue iteration, which
222 // will have already been triggered by the queue
224 logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
227 err := sch.queue.Lock(uuid)
229 logger.WithError(err).Warn("error locking container")
232 logger.Debug("lock succeeded")
233 ctr, ok := sch.queue.Get(uuid)
235 logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
236 } else if ctr.State != arvados.ContainerStateLocked {
237 logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
241 // Acquire a non-blocking lock for specified UUID, returning true if
242 // successful. The op argument is used only for debug logs.
244 // If the lock is not available, uuidLock arranges to wake up the
245 // scheduler after a short delay, so it can retry whatever operation
246 // is trying to get the lock (if that operation is still worth doing).
248 // This mechanism helps avoid spamming the controller/database with
249 // concurrent updates for any single container, even when the
250 // scheduler loop is running frequently.
251 func (sch *Scheduler) uuidLock(uuid, op string) bool {
253 defer sch.mtx.Unlock()
254 logger := sch.logger.WithFields(logrus.Fields{
255 "ContainerUUID": uuid,
258 if op, locked := sch.uuidOp[uuid]; locked {
259 logger.Debugf("uuidLock not available, Op=%s in progress", op)
260 // Make sure the scheduler loop wakes up to retry.
261 sch.wakeup.Reset(time.Second / 4)
264 logger.Debug("uuidLock acquired")
265 sch.uuidOp[uuid] = op
269 func (sch *Scheduler) uuidUnlock(uuid string) {
271 defer sch.mtx.Unlock()
272 delete(sch.uuidOp, uuid)