20457: Include delayed supervisor containers in overquota metric.
[arvados.git] / lib / dispatchcloud / scheduler / run_queue.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package scheduler
6
7 import (
8         "sort"
9         "time"
10
11         "git.arvados.org/arvados.git/lib/dispatchcloud/container"
12         "git.arvados.org/arvados.git/sdk/go/arvados"
13         "github.com/sirupsen/logrus"
14 )
15
16 var quietAfter503 = time.Minute
17
18 func (sch *Scheduler) runQueue() {
19         unsorted, _ := sch.queue.Entries()
20         sorted := make([]container.QueueEnt, 0, len(unsorted))
21         for _, ent := range unsorted {
22                 sorted = append(sorted, ent)
23         }
24         sort.Slice(sorted, func(i, j int) bool {
25                 if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
26                         return pi > pj
27                 } else {
28                         // When containers have identical priority,
29                         // start them in the order we first noticed
30                         // them. This avoids extra lock/unlock cycles
31                         // when we unlock the containers that don't
32                         // fit in the available pool.
33                         return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
34                 }
35         })
36
37         running := sch.pool.Running()
38         unalloc := sch.pool.Unallocated()
39
40         if t := sch.client.Last503(); t.After(sch.last503time) {
41                 // API has sent an HTTP 503 response since last time
42                 // we checked. Use current #containers - 1 as
43                 // maxConcurrency, i.e., try to stay just below the
44                 // level where we see 503s.
45                 sch.last503time = t
46                 if newlimit := len(running) - 1; newlimit < 1 {
47                         sch.maxConcurrency = 1
48                 } else {
49                         sch.maxConcurrency = newlimit
50                 }
51         } else if sch.maxConcurrency > 0 && time.Since(sch.last503time) > quietAfter503 {
52                 // If we haven't seen any 503 errors lately, raise
53                 // limit to ~10% beyond the current workload.
54                 //
55                 // As we use the added 10% to schedule more
56                 // containers, len(running) will increase and we'll
57                 // push the limit up further. Soon enough,
58                 // maxConcurrency will get high enough to schedule the
59                 // entire queue, hit pool quota, or get 503s again.
60                 max := len(running)*11/10 + 1
61                 if sch.maxConcurrency < max {
62                         sch.maxConcurrency = max
63                 }
64         }
65         if sch.last503time.IsZero() {
66                 sch.mLast503Time.Set(0)
67         } else {
68                 sch.mLast503Time.Set(float64(sch.last503time.Unix()))
69         }
70         sch.mMaxContainerConcurrency.Set(float64(sch.maxConcurrency))
71
72         sch.logger.WithFields(logrus.Fields{
73                 "Containers":     len(sorted),
74                 "Processes":      len(running),
75                 "maxConcurrency": sch.maxConcurrency,
76         }).Debug("runQueue")
77
78         dontstart := map[arvados.InstanceType]bool{}
79         var overquota []container.QueueEnt    // entries that are unmappable because of worker pool quota
80         var overmaxsuper []container.QueueEnt // unmappable because max supervisors (these are not included in overquota)
81         var containerAllocatedWorkerBootingCount int
82
83         // trying is #containers running + #containers we're trying to
84         // start. We stop trying to start more containers if this
85         // reaches the dynamic maxConcurrency limit.
86         trying := len(running)
87
88         supervisors := 0
89
90 tryrun:
91         for i, ent := range sorted {
92                 ctr, it := ent.Container, ent.InstanceType
93                 logger := sch.logger.WithFields(logrus.Fields{
94                         "ContainerUUID": ctr.UUID,
95                         "InstanceType":  it.Name,
96                 })
97                 if ctr.SchedulingParameters.Supervisor {
98                         supervisors += 1
99                         if sch.maxSupervisors > 0 && supervisors > sch.maxSupervisors {
100                                 overmaxsuper = append(overmaxsuper, sorted[i])
101                                 continue
102                         }
103                 }
104                 if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
105                         continue
106                 }
107                 switch ctr.State {
108                 case arvados.ContainerStateQueued:
109                         if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
110                                 logger.Tracef("not locking: already at maxConcurrency %d", sch.maxConcurrency)
111                                 overquota = sorted[i:]
112                                 break tryrun
113                         }
114                         trying++
115                         if unalloc[it] < 1 && sch.pool.AtQuota() {
116                                 logger.Trace("not locking: AtQuota and no unalloc workers")
117                                 overquota = sorted[i:]
118                                 break tryrun
119                         }
120                         if sch.pool.KillContainer(ctr.UUID, "about to lock") {
121                                 logger.Info("not locking: crunch-run process from previous attempt has not exited")
122                                 continue
123                         }
124                         go sch.lockContainer(logger, ctr.UUID)
125                         unalloc[it]--
126                 case arvados.ContainerStateLocked:
127                         if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
128                                 logger.Debugf("not starting: already at maxConcurrency %d", sch.maxConcurrency)
129                                 overquota = sorted[i:]
130                                 break tryrun
131                         }
132                         trying++
133                         if unalloc[it] > 0 {
134                                 unalloc[it]--
135                         } else if sch.pool.AtQuota() {
136                                 // Don't let lower-priority containers
137                                 // starve this one by using keeping
138                                 // idle workers alive on different
139                                 // instance types.
140                                 logger.Trace("overquota")
141                                 overquota = sorted[i:]
142                                 break tryrun
143                         } else if sch.pool.Create(it) {
144                                 // Success. (Note pool.Create works
145                                 // asynchronously and does its own
146                                 // logging about the eventual outcome,
147                                 // so we don't need to.)
148                                 logger.Info("creating new instance")
149                         } else {
150                                 // Failed despite not being at quota,
151                                 // e.g., cloud ops throttled.  TODO:
152                                 // avoid getting starved here if
153                                 // instances of a specific type always
154                                 // fail.
155                                 logger.Trace("pool declined to create new instance")
156                                 continue
157                         }
158
159                         if dontstart[it] {
160                                 // We already tried & failed to start
161                                 // a higher-priority container on the
162                                 // same instance type. Don't let this
163                                 // one sneak in ahead of it.
164                         } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
165                                 logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
166                         } else if sch.pool.StartContainer(it, ctr) {
167                                 logger.Trace("StartContainer => true")
168                                 // Success.
169                         } else {
170                                 logger.Trace("StartContainer => false")
171                                 containerAllocatedWorkerBootingCount += 1
172                                 dontstart[it] = true
173                         }
174                 }
175         }
176
177         sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
178         sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota) + len(overmaxsuper)))
179
180         if len(overquota)+len(overmaxsuper) > 0 {
181                 // Unlock any containers that are unmappable while
182                 // we're at quota (but if they have already been
183                 // scheduled and they're loading docker images etc.,
184                 // let them run).
185                 for _, ctr := range append(overmaxsuper, overquota...) {
186                         ctr := ctr.Container
187                         _, toolate := running[ctr.UUID]
188                         if ctr.State == arvados.ContainerStateLocked && !toolate {
189                                 logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
190                                 logger.Info("unlock because pool capacity is used by higher priority containers")
191                                 err := sch.queue.Unlock(ctr.UUID)
192                                 if err != nil {
193                                         logger.WithError(err).Warn("error unlocking")
194                                 }
195                         }
196                 }
197                 // Shut down idle workers that didn't get any
198                 // containers mapped onto them before we hit quota.
199                 for it, n := range unalloc {
200                         if n < 1 {
201                                 continue
202                         }
203                         sch.pool.Shutdown(it)
204                 }
205         }
206 }
207
208 // Lock the given container. Should be called in a new goroutine.
209 func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
210         if !sch.uuidLock(uuid, "lock") {
211                 return
212         }
213         defer sch.uuidUnlock(uuid)
214         if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
215                 // This happens if the container has been cancelled or
216                 // locked since runQueue called sch.queue.Entries(),
217                 // possibly by a lockContainer() call from a previous
218                 // runQueue iteration. In any case, we will respond
219                 // appropriately on the next runQueue iteration, which
220                 // will have already been triggered by the queue
221                 // update.
222                 logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
223                 return
224         }
225         err := sch.queue.Lock(uuid)
226         if err != nil {
227                 logger.WithError(err).Warn("error locking container")
228                 return
229         }
230         logger.Debug("lock succeeded")
231         ctr, ok := sch.queue.Get(uuid)
232         if !ok {
233                 logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
234         } else if ctr.State != arvados.ContainerStateLocked {
235                 logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
236         }
237 }
238
239 // Acquire a non-blocking lock for specified UUID, returning true if
240 // successful.  The op argument is used only for debug logs.
241 //
242 // If the lock is not available, uuidLock arranges to wake up the
243 // scheduler after a short delay, so it can retry whatever operation
244 // is trying to get the lock (if that operation is still worth doing).
245 //
246 // This mechanism helps avoid spamming the controller/database with
247 // concurrent updates for any single container, even when the
248 // scheduler loop is running frequently.
249 func (sch *Scheduler) uuidLock(uuid, op string) bool {
250         sch.mtx.Lock()
251         defer sch.mtx.Unlock()
252         logger := sch.logger.WithFields(logrus.Fields{
253                 "ContainerUUID": uuid,
254                 "Op":            op,
255         })
256         if op, locked := sch.uuidOp[uuid]; locked {
257                 logger.Debugf("uuidLock not available, Op=%s in progress", op)
258                 // Make sure the scheduler loop wakes up to retry.
259                 sch.wakeup.Reset(time.Second / 4)
260                 return false
261         }
262         logger.Debug("uuidLock acquired")
263         sch.uuidOp[uuid] = op
264         return true
265 }
266
267 func (sch *Scheduler) uuidUnlock(uuid string) {
268         sch.mtx.Lock()
269         defer sch.mtx.Unlock()
270         delete(sch.uuidOp, uuid)
271 }