20182: Add option to limit the number of supervisor containers
[arvados.git] / lib / dispatchcloud / scheduler / run_queue.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package scheduler
6
7 import (
8         "sort"
9         "time"
10
11         "git.arvados.org/arvados.git/lib/dispatchcloud/container"
12         "git.arvados.org/arvados.git/sdk/go/arvados"
13         "github.com/sirupsen/logrus"
14 )
15
16 var quietAfter503 = time.Minute
17
18 func isSupervisor(ctr arvados.Container) bool {
19         return (len(ctr.Command) > 0 && ctr.Command[0] == "arvados-cwl-runner") || ctr.SchedulingParameters.Supervisor
20 }
21
22 func (sch *Scheduler) runQueue() {
23         unsorted, _ := sch.queue.Entries()
24         sorted := make([]container.QueueEnt, 0, len(unsorted))
25         for _, ent := range unsorted {
26                 sorted = append(sorted, ent)
27         }
28         sort.Slice(sorted, func(i, j int) bool {
29                 if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
30                         return pi > pj
31                 } else {
32                         // When containers have identical priority,
33                         // start them in the order we first noticed
34                         // them. This avoids extra lock/unlock cycles
35                         // when we unlock the containers that don't
36                         // fit in the available pool.
37                         return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
38                 }
39         })
40
41         running := sch.pool.Running()
42         unalloc := sch.pool.Unallocated()
43
44         if t := sch.client.Last503(); t.After(sch.last503time) {
45                 // API has sent an HTTP 503 response since last time
46                 // we checked. Use current #containers - 1 as
47                 // maxConcurrency, i.e., try to stay just below the
48                 // level where we see 503s.
49                 sch.last503time = t
50                 if newlimit := len(running) - 1; newlimit < 1 {
51                         sch.maxConcurrency = 1
52                 } else {
53                         sch.maxConcurrency = newlimit
54                 }
55         } else if sch.maxConcurrency > 0 && time.Since(sch.last503time) > quietAfter503 {
56                 // If we haven't seen any 503 errors lately, raise
57                 // limit to ~10% beyond the current workload.
58                 //
59                 // As we use the added 10% to schedule more
60                 // containers, len(running) will increase and we'll
61                 // push the limit up further. Soon enough,
62                 // maxConcurrency will get high enough to schedule the
63                 // entire queue, hit pool quota, or get 503s again.
64                 max := len(running)*11/10 + 1
65                 if sch.maxConcurrency < max {
66                         sch.maxConcurrency = max
67                 }
68         }
69         if sch.last503time.IsZero() {
70                 sch.mLast503Time.Set(0)
71         } else {
72                 sch.mLast503Time.Set(float64(sch.last503time.Unix()))
73         }
74         sch.mMaxContainerConcurrency.Set(float64(sch.maxConcurrency))
75
76         sch.logger.WithFields(logrus.Fields{
77                 "Containers":     len(sorted),
78                 "Processes":      len(running),
79                 "maxConcurrency": sch.maxConcurrency,
80         }).Debug("runQueue")
81
82         dontstart := map[arvados.InstanceType]bool{}
83         var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota
84         var containerAllocatedWorkerBootingCount int
85
86         // trying is #containers running + #containers we're trying to
87         // start. We stop trying to start more containers if this
88         // reaches the dynamic maxConcurrency limit.
89         trying := len(running)
90
91         supervisors := 0
92
93 tryrun:
94         for i, ctr := range sorted {
95                 ctr, it := ctr.Container, ctr.InstanceType
96                 logger := sch.logger.WithFields(logrus.Fields{
97                         "ContainerUUID": ctr.UUID,
98                         "InstanceType":  it.Name,
99                 })
100                 if isSupervisor(ctr) {
101                         supervisors += 1
102                         if sch.maxSupervisors > 0 && supervisors > sch.maxSupervisors {
103                                 continue
104                         }
105                 }
106                 if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
107                         continue
108                 }
109                 switch ctr.State {
110                 case arvados.ContainerStateQueued:
111                         if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
112                                 logger.Tracef("not locking: already at maxConcurrency %d", sch.maxConcurrency)
113                                 overquota = sorted[i:]
114                                 break tryrun
115                         }
116                         trying++
117                         if unalloc[it] < 1 && sch.pool.AtQuota() {
118                                 logger.Trace("not locking: AtQuota and no unalloc workers")
119                                 overquota = sorted[i:]
120                                 break tryrun
121                         }
122                         if sch.pool.KillContainer(ctr.UUID, "about to lock") {
123                                 logger.Info("not locking: crunch-run process from previous attempt has not exited")
124                                 continue
125                         }
126                         go sch.lockContainer(logger, ctr.UUID)
127                         unalloc[it]--
128                 case arvados.ContainerStateLocked:
129                         if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
130                                 logger.Debugf("not starting: already at maxConcurrency %d", sch.maxConcurrency)
131                                 overquota = sorted[i:]
132                                 break tryrun
133                         }
134                         trying++
135                         if unalloc[it] > 0 {
136                                 unalloc[it]--
137                         } else if sch.pool.AtQuota() {
138                                 // Don't let lower-priority containers
139                                 // starve this one by using keeping
140                                 // idle workers alive on different
141                                 // instance types.
142                                 logger.Trace("overquota")
143                                 overquota = sorted[i:]
144                                 break tryrun
145                         } else if sch.pool.Create(it) {
146                                 // Success. (Note pool.Create works
147                                 // asynchronously and does its own
148                                 // logging about the eventual outcome,
149                                 // so we don't need to.)
150                                 logger.Info("creating new instance")
151                         } else {
152                                 // Failed despite not being at quota,
153                                 // e.g., cloud ops throttled.  TODO:
154                                 // avoid getting starved here if
155                                 // instances of a specific type always
156                                 // fail.
157                                 logger.Trace("pool declined to create new instance")
158                                 continue
159                         }
160
161                         if dontstart[it] {
162                                 // We already tried & failed to start
163                                 // a higher-priority container on the
164                                 // same instance type. Don't let this
165                                 // one sneak in ahead of it.
166                         } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
167                                 logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
168                         } else if sch.pool.StartContainer(it, ctr) {
169                                 logger.Trace("StartContainer => true")
170                                 // Success.
171                         } else {
172                                 logger.Trace("StartContainer => false")
173                                 containerAllocatedWorkerBootingCount += 1
174                                 dontstart[it] = true
175                         }
176                 }
177         }
178
179         sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
180         sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota)))
181
182         if len(overquota) > 0 {
183                 // Unlock any containers that are unmappable while
184                 // we're at quota (but if they have already been
185                 // scheduled and they're loading docker images etc.,
186                 // let them run).
187                 for _, ctr := range overquota {
188                         ctr := ctr.Container
189                         _, toolate := running[ctr.UUID]
190                         if ctr.State == arvados.ContainerStateLocked && !toolate {
191                                 logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
192                                 logger.Debug("unlock because pool capacity is used by higher priority containers")
193                                 err := sch.queue.Unlock(ctr.UUID)
194                                 if err != nil {
195                                         logger.WithError(err).Warn("error unlocking")
196                                 }
197                         }
198                 }
199                 // Shut down idle workers that didn't get any
200                 // containers mapped onto them before we hit quota.
201                 for it, n := range unalloc {
202                         if n < 1 {
203                                 continue
204                         }
205                         sch.pool.Shutdown(it)
206                 }
207         }
208 }
209
210 // Lock the given container. Should be called in a new goroutine.
211 func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
212         if !sch.uuidLock(uuid, "lock") {
213                 return
214         }
215         defer sch.uuidUnlock(uuid)
216         if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
217                 // This happens if the container has been cancelled or
218                 // locked since runQueue called sch.queue.Entries(),
219                 // possibly by a lockContainer() call from a previous
220                 // runQueue iteration. In any case, we will respond
221                 // appropriately on the next runQueue iteration, which
222                 // will have already been triggered by the queue
223                 // update.
224                 logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
225                 return
226         }
227         err := sch.queue.Lock(uuid)
228         if err != nil {
229                 logger.WithError(err).Warn("error locking container")
230                 return
231         }
232         logger.Debug("lock succeeded")
233         ctr, ok := sch.queue.Get(uuid)
234         if !ok {
235                 logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
236         } else if ctr.State != arvados.ContainerStateLocked {
237                 logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
238         }
239 }
240
241 // Acquire a non-blocking lock for specified UUID, returning true if
242 // successful.  The op argument is used only for debug logs.
243 //
244 // If the lock is not available, uuidLock arranges to wake up the
245 // scheduler after a short delay, so it can retry whatever operation
246 // is trying to get the lock (if that operation is still worth doing).
247 //
248 // This mechanism helps avoid spamming the controller/database with
249 // concurrent updates for any single container, even when the
250 // scheduler loop is running frequently.
251 func (sch *Scheduler) uuidLock(uuid, op string) bool {
252         sch.mtx.Lock()
253         defer sch.mtx.Unlock()
254         logger := sch.logger.WithFields(logrus.Fields{
255                 "ContainerUUID": uuid,
256                 "Op":            op,
257         })
258         if op, locked := sch.uuidOp[uuid]; locked {
259                 logger.Debugf("uuidLock not available, Op=%s in progress", op)
260                 // Make sure the scheduler loop wakes up to retry.
261                 sch.wakeup.Reset(time.Second / 4)
262                 return false
263         }
264         logger.Debug("uuidLock acquired")
265         sch.uuidOp[uuid] = op
266         return true
267 }
268
269 func (sch *Scheduler) uuidUnlock(uuid string) {
270         sch.mtx.Lock()
271         defer sch.mtx.Unlock()
272         delete(sch.uuidOp, uuid)
273 }