19675: Merge branch '19675-instance-types-panel' from arvados-workbench2.git
[arvados.git] / lib / dispatchcloud / scheduler / run_queue.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package scheduler
6
7 import (
8         "sort"
9         "time"
10
11         "git.arvados.org/arvados.git/lib/dispatchcloud/container"
12         "git.arvados.org/arvados.git/sdk/go/arvados"
13         "github.com/sirupsen/logrus"
14 )
15
16 var quietAfter503 = time.Minute
17
18 func (sch *Scheduler) runQueue() {
19         running := sch.pool.Running()
20         unalloc := sch.pool.Unallocated()
21
22         totalInstances := 0
23         for _, n := range sch.pool.CountWorkers() {
24                 totalInstances += n
25         }
26
27         unsorted, _ := sch.queue.Entries()
28         sorted := make([]container.QueueEnt, 0, len(unsorted))
29         for _, ent := range unsorted {
30                 sorted = append(sorted, ent)
31         }
32         sort.Slice(sorted, func(i, j int) bool {
33                 _, irunning := running[sorted[i].Container.UUID]
34                 _, jrunning := running[sorted[j].Container.UUID]
35                 if irunning != jrunning {
36                         // Ensure the "tryrun" loop (see below) sees
37                         // already-scheduled containers first, to
38                         // ensure existing supervisor containers are
39                         // properly counted before we decide whether
40                         // we have room for new ones.
41                         return irunning
42                 }
43                 ilocked := sorted[i].Container.State == arvados.ContainerStateLocked
44                 jlocked := sorted[j].Container.State == arvados.ContainerStateLocked
45                 if ilocked != jlocked {
46                         // Give precedence to containers that we have
47                         // already locked, even if higher-priority
48                         // containers have since arrived in the
49                         // queue. This avoids undesirable queue churn
50                         // effects including extra lock/unlock cycles
51                         // and bringing up new instances and quickly
52                         // shutting them down to make room for
53                         // different instance sizes.
54                         return ilocked
55                 } else if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
56                         return pi > pj
57                 } else {
58                         // When containers have identical priority,
59                         // start them in the order we first noticed
60                         // them. This avoids extra lock/unlock cycles
61                         // when we unlock the containers that don't
62                         // fit in the available pool.
63                         return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
64                 }
65         })
66
67         if t := sch.client.Last503(); t.After(sch.last503time) {
68                 // API has sent an HTTP 503 response since last time
69                 // we checked. Use current #containers - 1 as
70                 // maxConcurrency, i.e., try to stay just below the
71                 // level where we see 503s.
72                 sch.last503time = t
73                 if newlimit := len(running) - 1; newlimit < 1 {
74                         sch.maxConcurrency = 1
75                 } else {
76                         sch.maxConcurrency = newlimit
77                 }
78         } else if sch.maxConcurrency > 0 && time.Since(sch.last503time) > quietAfter503 {
79                 // If we haven't seen any 503 errors lately, raise
80                 // limit to ~10% beyond the current workload.
81                 //
82                 // As we use the added 10% to schedule more
83                 // containers, len(running) will increase and we'll
84                 // push the limit up further. Soon enough,
85                 // maxConcurrency will get high enough to schedule the
86                 // entire queue, hit pool quota, or get 503s again.
87                 max := len(running)*11/10 + 1
88                 if sch.maxConcurrency < max {
89                         sch.maxConcurrency = max
90                 }
91         }
92         if sch.last503time.IsZero() {
93                 sch.mLast503Time.Set(0)
94         } else {
95                 sch.mLast503Time.Set(float64(sch.last503time.Unix()))
96         }
97         if sch.maxInstances > 0 && sch.maxConcurrency > sch.maxInstances {
98                 sch.maxConcurrency = sch.maxInstances
99         }
100         if sch.instancesWithinQuota > 0 && sch.instancesWithinQuota < totalInstances {
101                 // Evidently it is possible to run this many
102                 // instances, so raise our estimate.
103                 sch.instancesWithinQuota = totalInstances
104         }
105         if sch.pool.AtQuota() {
106                 // Consider current workload to be the maximum
107                 // allowed, for the sake of reporting metrics and
108                 // calculating max supervisors.
109                 //
110                 // Now that sch.maxConcurrency is set, we will only
111                 // raise it past len(running) by 10%.  This helps
112                 // avoid running an inappropriate number of
113                 // supervisors when we reach the cloud-imposed quota
114                 // (which may be based on # CPUs etc) long before the
115                 // configured MaxInstances.
116                 if sch.maxConcurrency == 0 || sch.maxConcurrency > totalInstances {
117                         if totalInstances == 0 {
118                                 sch.maxConcurrency = 1
119                         } else {
120                                 sch.maxConcurrency = totalInstances
121                         }
122                 }
123                 sch.instancesWithinQuota = totalInstances
124         } else if sch.instancesWithinQuota > 0 && sch.maxConcurrency > sch.instancesWithinQuota+1 {
125                 // Once we've hit a quota error and started tracking
126                 // instancesWithinQuota (i.e., it's not zero), we
127                 // avoid exceeding that known-working level by more
128                 // than 1.
129                 //
130                 // If we don't do this, we risk entering a pattern of
131                 // repeatedly locking several containers, hitting
132                 // quota again, and unlocking them again each time the
133                 // driver stops reporting AtQuota, which tends to use
134                 // up the max lock/unlock cycles on the next few
135                 // containers in the queue, and cause them to fail.
136                 sch.maxConcurrency = sch.instancesWithinQuota + 1
137         }
138         sch.mMaxContainerConcurrency.Set(float64(sch.maxConcurrency))
139
140         maxSupervisors := int(float64(sch.maxConcurrency) * sch.supervisorFraction)
141         if maxSupervisors < 1 && sch.supervisorFraction > 0 && sch.maxConcurrency > 0 {
142                 maxSupervisors = 1
143         }
144
145         sch.logger.WithFields(logrus.Fields{
146                 "Containers":     len(sorted),
147                 "Processes":      len(running),
148                 "maxConcurrency": sch.maxConcurrency,
149         }).Debug("runQueue")
150
151         dontstart := map[arvados.InstanceType]bool{}
152         var atcapacity = map[string]bool{}    // ProviderTypes reported as AtCapacity during this runQueue() invocation
153         var overquota []container.QueueEnt    // entries that are unmappable because of worker pool quota
154         var overmaxsuper []container.QueueEnt // unmappable because max supervisors (these are not included in overquota)
155         var containerAllocatedWorkerBootingCount int
156
157         // trying is #containers running + #containers we're trying to
158         // start. We stop trying to start more containers if this
159         // reaches the dynamic maxConcurrency limit.
160         trying := len(running)
161
162         supervisors := 0
163
164 tryrun:
165         for i, ent := range sorted {
166                 ctr, types := ent.Container, ent.InstanceTypes
167                 logger := sch.logger.WithFields(logrus.Fields{
168                         "ContainerUUID": ctr.UUID,
169                 })
170                 if ctr.SchedulingParameters.Supervisor {
171                         supervisors += 1
172                         if maxSupervisors > 0 && supervisors > maxSupervisors {
173                                 overmaxsuper = append(overmaxsuper, sorted[i])
174                                 continue
175                         }
176                 }
177                 if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
178                         continue
179                 }
180                 // If we have unalloc instances of any of the eligible
181                 // instance types, unallocOK is true and unallocType
182                 // is the lowest-cost type.
183                 var unallocOK bool
184                 var unallocType arvados.InstanceType
185                 for _, it := range types {
186                         if unalloc[it] > 0 {
187                                 unallocOK = true
188                                 unallocType = it
189                                 break
190                         }
191                 }
192                 // If the pool is not reporting AtCapacity for any of
193                 // the eligible instance types, availableOK is true
194                 // and availableType is the lowest-cost type.
195                 var availableOK bool
196                 var availableType arvados.InstanceType
197                 for _, it := range types {
198                         if atcapacity[it.ProviderType] {
199                                 continue
200                         } else if sch.pool.AtCapacity(it) {
201                                 atcapacity[it.ProviderType] = true
202                                 continue
203                         } else {
204                                 availableOK = true
205                                 availableType = it
206                                 break
207                         }
208                 }
209                 switch ctr.State {
210                 case arvados.ContainerStateQueued:
211                         if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
212                                 logger.Tracef("not locking: already at maxConcurrency %d", sch.maxConcurrency)
213                                 continue
214                         }
215                         trying++
216                         if !unallocOK && sch.pool.AtQuota() {
217                                 logger.Trace("not locking: AtQuota and no unalloc workers")
218                                 overquota = sorted[i:]
219                                 break tryrun
220                         }
221                         if !unallocOK && !availableOK {
222                                 logger.Trace("not locking: AtCapacity and no unalloc workers")
223                                 continue
224                         }
225                         if sch.pool.KillContainer(ctr.UUID, "about to lock") {
226                                 logger.Info("not locking: crunch-run process from previous attempt has not exited")
227                                 continue
228                         }
229                         go sch.lockContainer(logger, ctr.UUID)
230                         unalloc[unallocType]--
231                 case arvados.ContainerStateLocked:
232                         if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
233                                 logger.Tracef("not starting: already at maxConcurrency %d", sch.maxConcurrency)
234                                 continue
235                         }
236                         trying++
237                         if unallocOK {
238                                 // We have a suitable instance type,
239                                 // so mark it as allocated, and try to
240                                 // start the container.
241                                 unalloc[unallocType]--
242                                 logger = logger.WithField("InstanceType", unallocType)
243                                 if dontstart[unallocType] {
244                                         // We already tried & failed to start
245                                         // a higher-priority container on the
246                                         // same instance type. Don't let this
247                                         // one sneak in ahead of it.
248                                 } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
249                                         logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
250                                 } else if sch.pool.StartContainer(unallocType, ctr) {
251                                         logger.Trace("StartContainer => true")
252                                 } else {
253                                         logger.Trace("StartContainer => false")
254                                         containerAllocatedWorkerBootingCount += 1
255                                         dontstart[unallocType] = true
256                                 }
257                                 continue
258                         }
259                         if sch.pool.AtQuota() {
260                                 // Don't let lower-priority containers
261                                 // starve this one by using keeping
262                                 // idle workers alive on different
263                                 // instance types.
264                                 logger.Trace("overquota")
265                                 overquota = sorted[i:]
266                                 break tryrun
267                         }
268                         if !availableOK {
269                                 // Continue trying lower-priority
270                                 // containers in case they can run on
271                                 // different instance types that are
272                                 // available.
273                                 //
274                                 // The local "atcapacity" cache helps
275                                 // when the pool's flag resets after
276                                 // we look at container A but before
277                                 // we look at lower-priority container
278                                 // B. In that case we want to run
279                                 // container A on the next call to
280                                 // runQueue(), rather than run
281                                 // container B now.
282                                 logger.Trace("all eligible types at capacity")
283                                 continue
284                         }
285                         logger = logger.WithField("InstanceType", availableType)
286                         if !sch.pool.Create(availableType) {
287                                 // Failed despite not being at quota,
288                                 // e.g., cloud ops throttled.
289                                 logger.Trace("pool declined to create new instance")
290                                 continue
291                         }
292                         // Success. (Note pool.Create works
293                         // asynchronously and does its own logging
294                         // about the eventual outcome, so we don't
295                         // need to.)
296                         logger.Info("creating new instance")
297                         // Don't bother trying to start the container
298                         // yet -- obviously the instance will take
299                         // some time to boot and become ready.
300                         containerAllocatedWorkerBootingCount += 1
301                         dontstart[availableType] = true
302                 }
303         }
304
305         sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
306         sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota) + len(overmaxsuper)))
307
308         if len(overquota)+len(overmaxsuper) > 0 {
309                 // Unlock any containers that are unmappable while
310                 // we're at quota (but if they have already been
311                 // scheduled and they're loading docker images etc.,
312                 // let them run).
313                 var unlock []container.QueueEnt
314                 unlock = append(unlock, overmaxsuper...)
315                 if totalInstances > 0 && len(overquota) > 1 {
316                         // We don't unlock the next-in-line container
317                         // when at quota.  This avoids a situation
318                         // where our "at quota" state expires, we lock
319                         // the next container and try to create an
320                         // instance, the cloud provider still returns
321                         // a quota error, we unlock the container, and
322                         // we repeat this until the container reaches
323                         // its limit of lock/unlock cycles.
324                         unlock = append(unlock, overquota[1:]...)
325                 } else {
326                         // However, if totalInstances is 0 and we're
327                         // still getting quota errors, then the
328                         // next-in-line container is evidently not
329                         // possible to run, so we should let it
330                         // exhaust its lock/unlock cycles and
331                         // eventually cancel, to avoid starvation.
332                         unlock = append(unlock, overquota...)
333                 }
334                 for _, ctr := range unlock {
335                         ctr := ctr.Container
336                         _, toolate := running[ctr.UUID]
337                         if ctr.State == arvados.ContainerStateLocked && !toolate {
338                                 logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
339                                 logger.Info("unlock because pool capacity is used by higher priority containers")
340                                 err := sch.queue.Unlock(ctr.UUID)
341                                 if err != nil {
342                                         logger.WithError(err).Warn("error unlocking")
343                                 }
344                         }
345                 }
346         }
347         if len(overquota) > 0 {
348                 // Shut down idle workers that didn't get any
349                 // containers mapped onto them before we hit quota.
350                 for it, n := range unalloc {
351                         if n < 1 {
352                                 continue
353                         }
354                         sch.pool.Shutdown(it)
355                 }
356         }
357 }
358
359 // Lock the given container. Should be called in a new goroutine.
360 func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
361         if !sch.uuidLock(uuid, "lock") {
362                 return
363         }
364         defer sch.uuidUnlock(uuid)
365         if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
366                 // This happens if the container has been cancelled or
367                 // locked since runQueue called sch.queue.Entries(),
368                 // possibly by a lockContainer() call from a previous
369                 // runQueue iteration. In any case, we will respond
370                 // appropriately on the next runQueue iteration, which
371                 // will have already been triggered by the queue
372                 // update.
373                 logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
374                 return
375         }
376         err := sch.queue.Lock(uuid)
377         if err != nil {
378                 logger.WithError(err).Warn("error locking container")
379                 return
380         }
381         logger.Debug("lock succeeded")
382         ctr, ok := sch.queue.Get(uuid)
383         if !ok {
384                 logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
385         } else if ctr.State != arvados.ContainerStateLocked {
386                 logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
387         }
388 }
389
390 // Acquire a non-blocking lock for specified UUID, returning true if
391 // successful.  The op argument is used only for debug logs.
392 //
393 // If the lock is not available, uuidLock arranges to wake up the
394 // scheduler after a short delay, so it can retry whatever operation
395 // is trying to get the lock (if that operation is still worth doing).
396 //
397 // This mechanism helps avoid spamming the controller/database with
398 // concurrent updates for any single container, even when the
399 // scheduler loop is running frequently.
400 func (sch *Scheduler) uuidLock(uuid, op string) bool {
401         sch.mtx.Lock()
402         defer sch.mtx.Unlock()
403         logger := sch.logger.WithFields(logrus.Fields{
404                 "ContainerUUID": uuid,
405                 "Op":            op,
406         })
407         if op, locked := sch.uuidOp[uuid]; locked {
408                 logger.Debugf("uuidLock not available, Op=%s in progress", op)
409                 // Make sure the scheduler loop wakes up to retry.
410                 sch.wakeup.Reset(time.Second / 4)
411                 return false
412         }
413         logger.Debug("uuidLock acquired")
414         sch.uuidOp[uuid] = op
415         return true
416 }
417
418 func (sch *Scheduler) uuidUnlock(uuid string) {
419         sch.mtx.Lock()
420         defer sch.mtx.Unlock()
421         delete(sch.uuidOp, uuid)
422 }