d2f6d1c2cb1b4eafa49a1421bae6c33eb9cb98d4
[arvados.git] / lib / dispatchcloud / scheduler / run_queue.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 package scheduler
6
7 import (
8         "sort"
9         "time"
10
11         "git.arvados.org/arvados.git/lib/dispatchcloud/container"
12         "git.arvados.org/arvados.git/sdk/go/arvados"
13         "github.com/sirupsen/logrus"
14 )
15
16 func (sch *Scheduler) runQueue() {
17         unsorted, _ := sch.queue.Entries()
18         sorted := make([]container.QueueEnt, 0, len(unsorted))
19         for _, ent := range unsorted {
20                 sorted = append(sorted, ent)
21         }
22         sort.Slice(sorted, func(i, j int) bool {
23                 return sorted[i].Container.Priority > sorted[j].Container.Priority
24         })
25
26         running := sch.pool.Running()
27         unalloc := sch.pool.Unallocated()
28
29         sch.logger.WithFields(logrus.Fields{
30                 "Containers": len(sorted),
31                 "Processes":  len(running),
32         }).Debug("runQueue")
33
34         dontstart := map[arvados.InstanceType]bool{}
35         var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota
36         var containerAllocatedWorkerBootingCount int
37
38 tryrun:
39         for i, ctr := range sorted {
40                 ctr, it := ctr.Container, ctr.InstanceType
41                 logger := sch.logger.WithFields(logrus.Fields{
42                         "ContainerUUID": ctr.UUID,
43                         "InstanceType":  it.Name,
44                 })
45                 if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
46                         continue
47                 }
48                 switch ctr.State {
49                 case arvados.ContainerStateQueued:
50                         if unalloc[it] < 1 && sch.pool.AtQuota() {
51                                 logger.Debug("not locking: AtQuota and no unalloc workers")
52                                 overquota = sorted[i:]
53                                 break tryrun
54                         }
55                         if sch.pool.KillContainer(ctr.UUID, "about to lock") {
56                                 logger.Info("not locking: crunch-run process from previous attempt has not exited")
57                                 continue
58                         }
59                         go sch.lockContainer(logger, ctr.UUID)
60                         unalloc[it]--
61                 case arvados.ContainerStateLocked:
62                         if unalloc[it] > 0 {
63                                 unalloc[it]--
64                         } else if sch.pool.AtQuota() {
65                                 // Don't let lower-priority containers
66                                 // starve this one by using keeping
67                                 // idle workers alive on different
68                                 // instance types.
69                                 logger.Debug("overquota")
70                                 overquota = sorted[i:]
71                                 break tryrun
72                         } else if logger.Info("creating new instance"); sch.pool.Create(it) {
73                                 // Success. (Note pool.Create works
74                                 // asynchronously and does its own
75                                 // logging, so we don't need to.)
76                         } else {
77                                 // Failed despite not being at quota,
78                                 // e.g., cloud ops throttled.  TODO:
79                                 // avoid getting starved here if
80                                 // instances of a specific type always
81                                 // fail.
82                                 continue
83                         }
84
85                         if dontstart[it] {
86                                 // We already tried & failed to start
87                                 // a higher-priority container on the
88                                 // same instance type. Don't let this
89                                 // one sneak in ahead of it.
90                         } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
91                                 logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
92                         } else if sch.pool.StartContainer(it, ctr) {
93                                 // Success.
94                         } else {
95                                 containerAllocatedWorkerBootingCount += 1
96                                 dontstart[it] = true
97                         }
98                 }
99         }
100
101         sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
102         sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota)))
103
104         if len(overquota) > 0 {
105                 // Unlock any containers that are unmappable while
106                 // we're at quota.
107                 for _, ctr := range overquota {
108                         ctr := ctr.Container
109                         if ctr.State == arvados.ContainerStateLocked {
110                                 logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
111                                 logger.Debug("unlock because pool capacity is used by higher priority containers")
112                                 err := sch.queue.Unlock(ctr.UUID)
113                                 if err != nil {
114                                         logger.WithError(err).Warn("error unlocking")
115                                 }
116                         }
117                 }
118                 // Shut down idle workers that didn't get any
119                 // containers mapped onto them before we hit quota.
120                 for it, n := range unalloc {
121                         if n < 1 {
122                                 continue
123                         }
124                         sch.pool.Shutdown(it)
125                 }
126         }
127 }
128
129 // Lock the given container. Should be called in a new goroutine.
130 func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
131         if !sch.uuidLock(uuid, "lock") {
132                 return
133         }
134         defer sch.uuidUnlock(uuid)
135         if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
136                 // This happens if the container has been cancelled or
137                 // locked since runQueue called sch.queue.Entries(),
138                 // possibly by a lockContainer() call from a previous
139                 // runQueue iteration. In any case, we will respond
140                 // appropriately on the next runQueue iteration, which
141                 // will have already been triggered by the queue
142                 // update.
143                 logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
144                 return
145         }
146         err := sch.queue.Lock(uuid)
147         if err != nil {
148                 logger.WithError(err).Warn("error locking container")
149                 return
150         }
151         logger.Debug("lock succeeded")
152         ctr, ok := sch.queue.Get(uuid)
153         if !ok {
154                 logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
155         } else if ctr.State != arvados.ContainerStateLocked {
156                 logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
157         }
158 }
159
160 // Acquire a non-blocking lock for specified UUID, returning true if
161 // successful.  The op argument is used only for debug logs.
162 //
163 // If the lock is not available, uuidLock arranges to wake up the
164 // scheduler after a short delay, so it can retry whatever operation
165 // is trying to get the lock (if that operation is still worth doing).
166 //
167 // This mechanism helps avoid spamming the controller/database with
168 // concurrent updates for any single container, even when the
169 // scheduler loop is running frequently.
170 func (sch *Scheduler) uuidLock(uuid, op string) bool {
171         sch.mtx.Lock()
172         defer sch.mtx.Unlock()
173         logger := sch.logger.WithFields(logrus.Fields{
174                 "ContainerUUID": uuid,
175                 "Op":            op,
176         })
177         if op, locked := sch.uuidOp[uuid]; locked {
178                 logger.Debugf("uuidLock not available, Op=%s in progress", op)
179                 // Make sure the scheduler loop wakes up to retry.
180                 sch.wakeup.Reset(time.Second / 4)
181                 return false
182         }
183         logger.Debug("uuidLock acquired")
184         sch.uuidOp[uuid] = op
185         return true
186 }
187
188 func (sch *Scheduler) uuidUnlock(uuid string) {
189         sch.mtx.Lock()
190         defer sch.mtx.Unlock()
191         delete(sch.uuidOp, uuid)
192 }