20457: Fix flaky test.
[arvados.git] / lib / dispatchcloud / scheduler / run_queue.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package scheduler
6
7 import (
8         "sort"
9         "time"
10
11         "git.arvados.org/arvados.git/lib/dispatchcloud/container"
12         "git.arvados.org/arvados.git/sdk/go/arvados"
13         "github.com/sirupsen/logrus"
14 )
15
16 var quietAfter503 = time.Minute
17
18 func (sch *Scheduler) runQueue() {
19         running := sch.pool.Running()
20         unalloc := sch.pool.Unallocated()
21
22         unsorted, _ := sch.queue.Entries()
23         sorted := make([]container.QueueEnt, 0, len(unsorted))
24         for _, ent := range unsorted {
25                 sorted = append(sorted, ent)
26         }
27         sort.Slice(sorted, func(i, j int) bool {
28                 _, irunning := running[sorted[i].Container.UUID]
29                 _, jrunning := running[sorted[j].Container.UUID]
30                 if irunning != jrunning {
31                         // Ensure the "tryrun" loop (see below) sees
32                         // already-scheduled containers first, to
33                         // ensure existing supervisor containers are
34                         // properly counted before we decide whether
35                         // we have room for new ones.
36                         return irunning
37                 }
38                 ilocked := sorted[i].Container.State == arvados.ContainerStateLocked
39                 jlocked := sorted[j].Container.State == arvados.ContainerStateLocked
40                 if ilocked != jlocked {
41                         // Give precedence to containers that we have
42                         // already locked, even if higher-priority
43                         // containers have since arrived in the
44                         // queue. This avoids undesirable queue churn
45                         // effects including extra lock/unlock cycles
46                         // and bringing up new instances and quickly
47                         // shutting them down to make room for
48                         // different instance sizes.
49                         return ilocked
50                 } else if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
51                         return pi > pj
52                 } else {
53                         // When containers have identical priority,
54                         // start them in the order we first noticed
55                         // them. This avoids extra lock/unlock cycles
56                         // when we unlock the containers that don't
57                         // fit in the available pool.
58                         return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
59                 }
60         })
61
62         if t := sch.client.Last503(); t.After(sch.last503time) {
63                 // API has sent an HTTP 503 response since last time
64                 // we checked. Use current #containers - 1 as
65                 // maxConcurrency, i.e., try to stay just below the
66                 // level where we see 503s.
67                 sch.last503time = t
68                 if newlimit := len(running) - 1; newlimit < 1 {
69                         sch.maxConcurrency = 1
70                 } else {
71                         sch.maxConcurrency = newlimit
72                 }
73         } else if sch.maxConcurrency > 0 && time.Since(sch.last503time) > quietAfter503 {
74                 // If we haven't seen any 503 errors lately, raise
75                 // limit to ~10% beyond the current workload.
76                 //
77                 // As we use the added 10% to schedule more
78                 // containers, len(running) will increase and we'll
79                 // push the limit up further. Soon enough,
80                 // maxConcurrency will get high enough to schedule the
81                 // entire queue, hit pool quota, or get 503s again.
82                 max := len(running)*11/10 + 1
83                 if sch.maxConcurrency < max {
84                         sch.maxConcurrency = max
85                 }
86         }
87         if sch.last503time.IsZero() {
88                 sch.mLast503Time.Set(0)
89         } else {
90                 sch.mLast503Time.Set(float64(sch.last503time.Unix()))
91         }
92         if sch.maxInstances > 0 && sch.maxConcurrency > sch.maxInstances {
93                 sch.maxConcurrency = sch.maxInstances
94         }
95         instances := len(running) + len(unalloc)
96         if sch.instancesWithinQuota > 0 && sch.instancesWithinQuota < instances {
97                 // Evidently it is possible to run this many
98                 // instances, so raise our estimate.
99                 sch.instancesWithinQuota = instances
100         }
101         if sch.pool.AtQuota() {
102                 // Consider current workload to be the maximum
103                 // allowed, for the sake of reporting metrics and
104                 // calculating max supervisors.
105                 //
106                 // Now that sch.maxConcurrency is set, we will only
107                 // raise it past len(running) by 10%.  This helps
108                 // avoid running an inappropriate number of
109                 // supervisors when we reach the cloud-imposed quota
110                 // (which may be based on # CPUs etc) long before the
111                 // configured MaxInstances.
112                 if sch.maxConcurrency == 0 || sch.maxConcurrency > instances {
113                         if instances == 0 {
114                                 sch.maxConcurrency = 1
115                         } else {
116                                 sch.maxConcurrency = instances
117                         }
118                 }
119                 sch.instancesWithinQuota = instances
120         } else if sch.instancesWithinQuota > 0 && sch.maxConcurrency > sch.instancesWithinQuota+1 {
121                 // Once we've hit a quota error and started tracking
122                 // instancesWithinQuota (i.e., it's not zero), we
123                 // avoid exceeding that known-working level by more
124                 // than 1.
125                 //
126                 // If we don't do this, we risk entering a pattern of
127                 // repeatedly locking several containers, hitting
128                 // quota again, and unlocking them again each time the
129                 // driver stops reporting AtQuota, which tends to use
130                 // up the max lock/unlock cycles on the next few
131                 // containers in the queue, and cause them to fail.
132                 sch.maxConcurrency = sch.instancesWithinQuota + 1
133         }
134         sch.mMaxContainerConcurrency.Set(float64(sch.maxConcurrency))
135
136         maxSupervisors := int(float64(sch.maxConcurrency) * sch.supervisorFraction)
137         if maxSupervisors < 1 && sch.supervisorFraction > 0 && sch.maxConcurrency > 0 {
138                 maxSupervisors = 1
139         }
140
141         sch.logger.WithFields(logrus.Fields{
142                 "Containers":     len(sorted),
143                 "Processes":      len(running),
144                 "maxConcurrency": sch.maxConcurrency,
145         }).Debug("runQueue")
146
147         dontstart := map[arvados.InstanceType]bool{}
148         var overquota []container.QueueEnt    // entries that are unmappable because of worker pool quota
149         var overmaxsuper []container.QueueEnt // unmappable because max supervisors (these are not included in overquota)
150         var containerAllocatedWorkerBootingCount int
151
152         // trying is #containers running + #containers we're trying to
153         // start. We stop trying to start more containers if this
154         // reaches the dynamic maxConcurrency limit.
155         trying := len(running)
156
157         supervisors := 0
158
159 tryrun:
160         for i, ent := range sorted {
161                 ctr, it := ent.Container, ent.InstanceType
162                 logger := sch.logger.WithFields(logrus.Fields{
163                         "ContainerUUID": ctr.UUID,
164                         "InstanceType":  it.Name,
165                 })
166                 if ctr.SchedulingParameters.Supervisor {
167                         supervisors += 1
168                         if maxSupervisors > 0 && supervisors > maxSupervisors {
169                                 overmaxsuper = append(overmaxsuper, sorted[i])
170                                 continue
171                         }
172                 }
173                 if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
174                         continue
175                 }
176                 switch ctr.State {
177                 case arvados.ContainerStateQueued:
178                         if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
179                                 logger.Tracef("not locking: already at maxConcurrency %d", sch.maxConcurrency)
180                                 continue
181                         }
182                         trying++
183                         if unalloc[it] < 1 && sch.pool.AtQuota() {
184                                 logger.Trace("not locking: AtQuota and no unalloc workers")
185                                 overquota = sorted[i:]
186                                 break tryrun
187                         }
188                         if sch.pool.KillContainer(ctr.UUID, "about to lock") {
189                                 logger.Info("not locking: crunch-run process from previous attempt has not exited")
190                                 continue
191                         }
192                         go sch.lockContainer(logger, ctr.UUID)
193                         unalloc[it]--
194                 case arvados.ContainerStateLocked:
195                         if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
196                                 logger.Tracef("not starting: already at maxConcurrency %d", sch.maxConcurrency)
197                                 continue
198                         }
199                         trying++
200                         if unalloc[it] > 0 {
201                                 unalloc[it]--
202                         } else if sch.pool.AtQuota() {
203                                 // Don't let lower-priority containers
204                                 // starve this one by using keeping
205                                 // idle workers alive on different
206                                 // instance types.
207                                 logger.Trace("overquota")
208                                 overquota = sorted[i:]
209                                 break tryrun
210                         } else if sch.pool.Create(it) {
211                                 // Success. (Note pool.Create works
212                                 // asynchronously and does its own
213                                 // logging about the eventual outcome,
214                                 // so we don't need to.)
215                                 logger.Info("creating new instance")
216                         } else {
217                                 // Failed despite not being at quota,
218                                 // e.g., cloud ops throttled.  TODO:
219                                 // avoid getting starved here if
220                                 // instances of a specific type always
221                                 // fail.
222                                 logger.Trace("pool declined to create new instance")
223                                 continue
224                         }
225
226                         if dontstart[it] {
227                                 // We already tried & failed to start
228                                 // a higher-priority container on the
229                                 // same instance type. Don't let this
230                                 // one sneak in ahead of it.
231                         } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
232                                 logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
233                         } else if sch.pool.StartContainer(it, ctr) {
234                                 logger.Trace("StartContainer => true")
235                                 // Success.
236                         } else {
237                                 logger.Trace("StartContainer => false")
238                                 containerAllocatedWorkerBootingCount += 1
239                                 dontstart[it] = true
240                         }
241                 }
242         }
243
244         sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
245         sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota) + len(overmaxsuper)))
246
247         if len(overquota)+len(overmaxsuper) > 0 {
248                 // Unlock any containers that are unmappable while
249                 // we're at quota (but if they have already been
250                 // scheduled and they're loading docker images etc.,
251                 // let them run).
252                 for _, ctr := range append(overmaxsuper, overquota...) {
253                         ctr := ctr.Container
254                         _, toolate := running[ctr.UUID]
255                         if ctr.State == arvados.ContainerStateLocked && !toolate {
256                                 logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
257                                 logger.Info("unlock because pool capacity is used by higher priority containers")
258                                 err := sch.queue.Unlock(ctr.UUID)
259                                 if err != nil {
260                                         logger.WithError(err).Warn("error unlocking")
261                                 }
262                         }
263                 }
264         }
265         if len(overquota) > 0 {
266                 // Shut down idle workers that didn't get any
267                 // containers mapped onto them before we hit quota.
268                 for it, n := range unalloc {
269                         if n < 1 {
270                                 continue
271                         }
272                         sch.pool.Shutdown(it)
273                 }
274         }
275 }
276
277 // Lock the given container. Should be called in a new goroutine.
278 func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
279         if !sch.uuidLock(uuid, "lock") {
280                 return
281         }
282         defer sch.uuidUnlock(uuid)
283         if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
284                 // This happens if the container has been cancelled or
285                 // locked since runQueue called sch.queue.Entries(),
286                 // possibly by a lockContainer() call from a previous
287                 // runQueue iteration. In any case, we will respond
288                 // appropriately on the next runQueue iteration, which
289                 // will have already been triggered by the queue
290                 // update.
291                 logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
292                 return
293         }
294         err := sch.queue.Lock(uuid)
295         if err != nil {
296                 logger.WithError(err).Warn("error locking container")
297                 return
298         }
299         logger.Debug("lock succeeded")
300         ctr, ok := sch.queue.Get(uuid)
301         if !ok {
302                 logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
303         } else if ctr.State != arvados.ContainerStateLocked {
304                 logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
305         }
306 }
307
308 // Acquire a non-blocking lock for specified UUID, returning true if
309 // successful.  The op argument is used only for debug logs.
310 //
311 // If the lock is not available, uuidLock arranges to wake up the
312 // scheduler after a short delay, so it can retry whatever operation
313 // is trying to get the lock (if that operation is still worth doing).
314 //
315 // This mechanism helps avoid spamming the controller/database with
316 // concurrent updates for any single container, even when the
317 // scheduler loop is running frequently.
318 func (sch *Scheduler) uuidLock(uuid, op string) bool {
319         sch.mtx.Lock()
320         defer sch.mtx.Unlock()
321         logger := sch.logger.WithFields(logrus.Fields{
322                 "ContainerUUID": uuid,
323                 "Op":            op,
324         })
325         if op, locked := sch.uuidOp[uuid]; locked {
326                 logger.Debugf("uuidLock not available, Op=%s in progress", op)
327                 // Make sure the scheduler loop wakes up to retry.
328                 sch.wakeup.Reset(time.Second / 4)
329                 return false
330         }
331         logger.Debug("uuidLock acquired")
332         sch.uuidOp[uuid] = op
333         return true
334 }
335
336 func (sch *Scheduler) uuidUnlock(uuid string) {
337         sch.mtx.Lock()
338         defer sch.mtx.Unlock()
339         delete(sch.uuidOp, uuid)
340 }