Fix 2.4.2 upgrade notes formatting refs #19330
[arvados.git] / lib / dispatchcloud / scheduler / run_queue.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package scheduler
6
7 import (
8         "sort"
9         "time"
10
11         "git.arvados.org/arvados.git/lib/dispatchcloud/container"
12         "git.arvados.org/arvados.git/sdk/go/arvados"
13         "github.com/sirupsen/logrus"
14 )
15
16 func (sch *Scheduler) runQueue() {
17         unsorted, _ := sch.queue.Entries()
18         sorted := make([]container.QueueEnt, 0, len(unsorted))
19         for _, ent := range unsorted {
20                 sorted = append(sorted, ent)
21         }
22         sort.Slice(sorted, func(i, j int) bool {
23                 if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
24                         return pi > pj
25                 } else {
26                         // When containers have identical priority,
27                         // start them in the order we first noticed
28                         // them. This avoids extra lock/unlock cycles
29                         // when we unlock the containers that don't
30                         // fit in the available pool.
31                         return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
32                 }
33         })
34
35         running := sch.pool.Running()
36         unalloc := sch.pool.Unallocated()
37
38         sch.logger.WithFields(logrus.Fields{
39                 "Containers": len(sorted),
40                 "Processes":  len(running),
41         }).Debug("runQueue")
42
43         dontstart := map[arvados.InstanceType]bool{}
44         var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota
45         var containerAllocatedWorkerBootingCount int
46
47 tryrun:
48         for i, ctr := range sorted {
49                 ctr, it := ctr.Container, ctr.InstanceType
50                 logger := sch.logger.WithFields(logrus.Fields{
51                         "ContainerUUID": ctr.UUID,
52                         "InstanceType":  it.Name,
53                 })
54                 if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
55                         continue
56                 }
57                 switch ctr.State {
58                 case arvados.ContainerStateQueued:
59                         if unalloc[it] < 1 && sch.pool.AtQuota() {
60                                 logger.Debug("not locking: AtQuota and no unalloc workers")
61                                 overquota = sorted[i:]
62                                 break tryrun
63                         }
64                         if sch.pool.KillContainer(ctr.UUID, "about to lock") {
65                                 logger.Info("not locking: crunch-run process from previous attempt has not exited")
66                                 continue
67                         }
68                         go sch.lockContainer(logger, ctr.UUID)
69                         unalloc[it]--
70                 case arvados.ContainerStateLocked:
71                         if unalloc[it] > 0 {
72                                 unalloc[it]--
73                         } else if sch.pool.AtQuota() {
74                                 // Don't let lower-priority containers
75                                 // starve this one by using keeping
76                                 // idle workers alive on different
77                                 // instance types.
78                                 logger.Trace("overquota")
79                                 overquota = sorted[i:]
80                                 break tryrun
81                         } else if sch.pool.Create(it) {
82                                 // Success. (Note pool.Create works
83                                 // asynchronously and does its own
84                                 // logging about the eventual outcome,
85                                 // so we don't need to.)
86                                 logger.Info("creating new instance")
87                         } else {
88                                 // Failed despite not being at quota,
89                                 // e.g., cloud ops throttled.  TODO:
90                                 // avoid getting starved here if
91                                 // instances of a specific type always
92                                 // fail.
93                                 logger.Trace("pool declined to create new instance")
94                                 continue
95                         }
96
97                         if dontstart[it] {
98                                 // We already tried & failed to start
99                                 // a higher-priority container on the
100                                 // same instance type. Don't let this
101                                 // one sneak in ahead of it.
102                         } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
103                                 logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
104                         } else if sch.pool.StartContainer(it, ctr) {
105                                 // Success.
106                         } else {
107                                 containerAllocatedWorkerBootingCount += 1
108                                 dontstart[it] = true
109                         }
110                 }
111         }
112
113         sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
114         sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota)))
115
116         if len(overquota) > 0 {
117                 // Unlock any containers that are unmappable while
118                 // we're at quota.
119                 for _, ctr := range overquota {
120                         ctr := ctr.Container
121                         if ctr.State == arvados.ContainerStateLocked {
122                                 logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
123                                 logger.Debug("unlock because pool capacity is used by higher priority containers")
124                                 err := sch.queue.Unlock(ctr.UUID)
125                                 if err != nil {
126                                         logger.WithError(err).Warn("error unlocking")
127                                 }
128                         }
129                 }
130                 // Shut down idle workers that didn't get any
131                 // containers mapped onto them before we hit quota.
132                 for it, n := range unalloc {
133                         if n < 1 {
134                                 continue
135                         }
136                         sch.pool.Shutdown(it)
137                 }
138         }
139 }
140
141 // Lock the given container. Should be called in a new goroutine.
142 func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
143         if !sch.uuidLock(uuid, "lock") {
144                 return
145         }
146         defer sch.uuidUnlock(uuid)
147         if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
148                 // This happens if the container has been cancelled or
149                 // locked since runQueue called sch.queue.Entries(),
150                 // possibly by a lockContainer() call from a previous
151                 // runQueue iteration. In any case, we will respond
152                 // appropriately on the next runQueue iteration, which
153                 // will have already been triggered by the queue
154                 // update.
155                 logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
156                 return
157         }
158         err := sch.queue.Lock(uuid)
159         if err != nil {
160                 logger.WithError(err).Warn("error locking container")
161                 return
162         }
163         logger.Debug("lock succeeded")
164         ctr, ok := sch.queue.Get(uuid)
165         if !ok {
166                 logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
167         } else if ctr.State != arvados.ContainerStateLocked {
168                 logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
169         }
170 }
171
172 // Acquire a non-blocking lock for specified UUID, returning true if
173 // successful.  The op argument is used only for debug logs.
174 //
175 // If the lock is not available, uuidLock arranges to wake up the
176 // scheduler after a short delay, so it can retry whatever operation
177 // is trying to get the lock (if that operation is still worth doing).
178 //
179 // This mechanism helps avoid spamming the controller/database with
180 // concurrent updates for any single container, even when the
181 // scheduler loop is running frequently.
182 func (sch *Scheduler) uuidLock(uuid, op string) bool {
183         sch.mtx.Lock()
184         defer sch.mtx.Unlock()
185         logger := sch.logger.WithFields(logrus.Fields{
186                 "ContainerUUID": uuid,
187                 "Op":            op,
188         })
189         if op, locked := sch.uuidOp[uuid]; locked {
190                 logger.Debugf("uuidLock not available, Op=%s in progress", op)
191                 // Make sure the scheduler loop wakes up to retry.
192                 sch.wakeup.Reset(time.Second / 4)
193                 return false
194         }
195         logger.Debug("uuidLock acquired")
196         sch.uuidOp[uuid] = op
197         return true
198 }
199
200 func (sch *Scheduler) uuidUnlock(uuid string) {
201         sch.mtx.Lock()
202         defer sch.mtx.Unlock()
203         delete(sch.uuidOp, uuid)
204 }