1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
11 "git.curoverse.com/arvados.git/lib/dispatchcloud/container"
12 "git.curoverse.com/arvados.git/sdk/go/arvados"
13 "github.com/sirupsen/logrus"
16 func (sch *Scheduler) runQueue() {
17 unsorted, _ := sch.queue.Entries()
18 sorted := make([]container.QueueEnt, 0, len(unsorted))
19 for _, ent := range unsorted {
20 sorted = append(sorted, ent)
22 sort.Slice(sorted, func(i, j int) bool {
23 return sorted[i].Container.Priority > sorted[j].Container.Priority
26 running := sch.pool.Running()
27 unalloc := sch.pool.Unallocated()
29 sch.logger.WithFields(logrus.Fields{
30 "Containers": len(sorted),
31 "Processes": len(running),
34 dontstart := map[arvados.InstanceType]bool{}
35 var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota
38 for i, ctr := range sorted {
39 ctr, it := ctr.Container, ctr.InstanceType
40 logger := sch.logger.WithFields(logrus.Fields{
41 "ContainerUUID": ctr.UUID,
42 "InstanceType": it.Name,
44 if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
48 case arvados.ContainerStateQueued:
49 if unalloc[it] < 1 && sch.pool.AtQuota() {
50 logger.Debug("not locking: AtQuota and no unalloc workers")
51 overquota = sorted[i:]
54 go sch.lockContainer(logger, ctr.UUID)
56 case arvados.ContainerStateLocked:
59 } else if sch.pool.AtQuota() {
60 logger.Debug("not starting: AtQuota and no unalloc workers")
61 overquota = sorted[i:]
64 logger.Info("creating new instance")
65 if !sch.pool.Create(it) {
66 // (Note pool.Create works
67 // asynchronously and logs its
68 // own failures, so we don't
69 // need to log this as a
72 sch.queue.Unlock(ctr.UUID)
73 // Don't let lower-priority
74 // containers starve this one
75 // by using keeping idle
76 // workers alive on different
77 // instance types. TODO:
78 // avoid getting starved here
79 // if instances of a specific
81 overquota = sorted[i:]
87 // We already tried & failed to start
88 // a higher-priority container on the
89 // same instance type. Don't let this
90 // one sneak in ahead of it.
91 } else if sch.pool.StartContainer(it, ctr) {
99 if len(overquota) > 0 {
100 // Unlock any containers that are unmappable while
102 for _, ctr := range overquota {
104 if ctr.State == arvados.ContainerStateLocked {
105 logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
106 logger.Debug("unlock because pool capacity is used by higher priority containers")
107 err := sch.queue.Unlock(ctr.UUID)
109 logger.WithError(err).Warn("error unlocking")
113 // Shut down idle workers that didn't get any
114 // containers mapped onto them before we hit quota.
115 for it, n := range unalloc {
119 sch.pool.Shutdown(it)
124 // Lock the given container. Should be called in a new goroutine.
125 func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
126 if !sch.uuidLock(uuid, "lock") {
129 defer sch.uuidUnlock(uuid)
130 if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
131 // This happens if the container has been cancelled or
132 // locked since runQueue called sch.queue.Entries(),
133 // possibly by a lockContainer() call from a previous
134 // runQueue iteration. In any case, we will respond
135 // appropriately on the next runQueue iteration, which
136 // will have already been triggered by the queue
138 logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
141 err := sch.queue.Lock(uuid)
143 logger.WithError(err).Warn("error locking container")
146 logger.Debug("lock succeeded")
147 ctr, ok := sch.queue.Get(uuid)
149 logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
150 } else if ctr.State != arvados.ContainerStateLocked {
151 logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
155 // Acquire a non-blocking lock for specified UUID, returning true if
156 // successful. The op argument is used only for debug logs.
158 // If the lock is not available, uuidLock arranges to wake up the
159 // scheduler after a short delay, so it can retry whatever operation
160 // is trying to get the lock (if that operation is still worth doing).
162 // This mechanism helps avoid spamming the controller/database with
163 // concurrent updates for any single container, even when the
164 // scheduler loop is running frequently.
165 func (sch *Scheduler) uuidLock(uuid, op string) bool {
167 defer sch.mtx.Unlock()
168 logger := sch.logger.WithFields(logrus.Fields{
169 "ContainerUUID": uuid,
172 if op, locked := sch.uuidOp[uuid]; locked {
173 logger.Debugf("uuidLock not available, Op=%s in progress", op)
174 // Make sure the scheduler loop wakes up to retry.
175 sch.wakeup.Reset(time.Second / 4)
178 logger.Debug("uuidLock acquired")
179 sch.uuidOp[uuid] = op
183 func (sch *Scheduler) uuidUnlock(uuid string) {
185 defer sch.mtx.Unlock()
186 delete(sch.uuidOp, uuid)