1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
11 "git.arvados.org/arvados.git/lib/dispatchcloud/container"
12 "git.arvados.org/arvados.git/sdk/go/arvados"
13 "github.com/sirupsen/logrus"
16 func (sch *Scheduler) runQueue() {
17 unsorted, _ := sch.queue.Entries()
18 sorted := make([]container.QueueEnt, 0, len(unsorted))
19 for _, ent := range unsorted {
20 sorted = append(sorted, ent)
22 sort.Slice(sorted, func(i, j int) bool {
23 if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
26 // When containers have identical priority,
27 // start them in the order we first noticed
28 // them. This avoids extra lock/unlock cycles
29 // when we unlock the containers that don't
30 // fit in the available pool.
31 return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
35 running := sch.pool.Running()
36 unalloc := sch.pool.Unallocated()
38 sch.logger.WithFields(logrus.Fields{
39 "Containers": len(sorted),
40 "Processes": len(running),
43 dontstart := map[arvados.InstanceType]bool{}
44 var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota
45 var containerAllocatedWorkerBootingCount int
48 for i, ctr := range sorted {
49 ctr, it := ctr.Container, ctr.InstanceType
50 logger := sch.logger.WithFields(logrus.Fields{
51 "ContainerUUID": ctr.UUID,
52 "InstanceType": it.Name,
54 if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
58 case arvados.ContainerStateQueued:
59 if unalloc[it] < 1 && sch.pool.AtQuota() {
60 logger.Debug("not locking: AtQuota and no unalloc workers")
61 overquota = sorted[i:]
64 if sch.pool.KillContainer(ctr.UUID, "about to lock") {
65 logger.Info("not locking: crunch-run process from previous attempt has not exited")
68 go sch.lockContainer(logger, ctr.UUID)
70 case arvados.ContainerStateLocked:
73 } else if sch.pool.AtQuota() {
74 // Don't let lower-priority containers
75 // starve this one by using keeping
76 // idle workers alive on different
78 logger.Trace("overquota")
79 overquota = sorted[i:]
81 } else if sch.pool.Create(it) {
82 // Success. (Note pool.Create works
83 // asynchronously and does its own
84 // logging about the eventual outcome,
85 // so we don't need to.)
86 logger.Info("creating new instance")
88 // Failed despite not being at quota,
89 // e.g., cloud ops throttled. TODO:
90 // avoid getting starved here if
91 // instances of a specific type always
93 logger.Trace("pool declined to create new instance")
98 // We already tried & failed to start
99 // a higher-priority container on the
100 // same instance type. Don't let this
101 // one sneak in ahead of it.
102 } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
103 logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
104 } else if sch.pool.StartContainer(it, ctr) {
107 containerAllocatedWorkerBootingCount += 1
113 sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
114 sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota)))
116 if len(overquota) > 0 {
117 // Unlock any containers that are unmappable while
119 for _, ctr := range overquota {
121 if ctr.State == arvados.ContainerStateLocked {
122 logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
123 logger.Debug("unlock because pool capacity is used by higher priority containers")
124 err := sch.queue.Unlock(ctr.UUID)
126 logger.WithError(err).Warn("error unlocking")
130 // Shut down idle workers that didn't get any
131 // containers mapped onto them before we hit quota.
132 for it, n := range unalloc {
136 sch.pool.Shutdown(it)
141 // Lock the given container. Should be called in a new goroutine.
142 func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
143 if !sch.uuidLock(uuid, "lock") {
146 defer sch.uuidUnlock(uuid)
147 if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
148 // This happens if the container has been cancelled or
149 // locked since runQueue called sch.queue.Entries(),
150 // possibly by a lockContainer() call from a previous
151 // runQueue iteration. In any case, we will respond
152 // appropriately on the next runQueue iteration, which
153 // will have already been triggered by the queue
155 logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
158 err := sch.queue.Lock(uuid)
160 logger.WithError(err).Warn("error locking container")
163 logger.Debug("lock succeeded")
164 ctr, ok := sch.queue.Get(uuid)
166 logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
167 } else if ctr.State != arvados.ContainerStateLocked {
168 logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
172 // Acquire a non-blocking lock for specified UUID, returning true if
173 // successful. The op argument is used only for debug logs.
175 // If the lock is not available, uuidLock arranges to wake up the
176 // scheduler after a short delay, so it can retry whatever operation
177 // is trying to get the lock (if that operation is still worth doing).
179 // This mechanism helps avoid spamming the controller/database with
180 // concurrent updates for any single container, even when the
181 // scheduler loop is running frequently.
182 func (sch *Scheduler) uuidLock(uuid, op string) bool {
184 defer sch.mtx.Unlock()
185 logger := sch.logger.WithFields(logrus.Fields{
186 "ContainerUUID": uuid,
189 if op, locked := sch.uuidOp[uuid]; locked {
190 logger.Debugf("uuidLock not available, Op=%s in progress", op)
191 // Make sure the scheduler loop wakes up to retry.
192 sch.wakeup.Reset(time.Second / 4)
195 logger.Debug("uuidLock acquired")
196 sch.uuidOp[uuid] = op
200 func (sch *Scheduler) uuidUnlock(uuid string) {
202 defer sch.mtx.Unlock()
203 delete(sch.uuidOp, uuid)