import (
"sort"
+ "time"
- "git.curoverse.com/arvados.git/lib/cloud"
- "git.curoverse.com/arvados.git/lib/dispatchcloud/container"
- "git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/Sirupsen/logrus"
+ "git.arvados.org/arvados.git/lib/dispatchcloud/container"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "github.com/sirupsen/logrus"
)
func (sch *Scheduler) runQueue() {
sorted = append(sorted, ent)
}
sort.Slice(sorted, func(i, j int) bool {
- return sorted[i].Container.Priority > sorted[j].Container.Priority
+ if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
+ return pi > pj
+ } else {
+ // When containers have identical priority,
+ // start them in the order we first noticed
+ // them. This avoids extra lock/unlock cycles
+ // when we unlock the containers that don't
+ // fit in the available pool.
+ return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
+ }
})
running := sch.pool.Running()
dontstart := map[arvados.InstanceType]bool{}
var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota
+ var containerAllocatedWorkerBootingCount int
tryrun:
for i, ctr := range sorted {
overquota = sorted[i:]
break tryrun
}
- sch.bgLock(logger, ctr.UUID)
+ if sch.pool.KillContainer(ctr.UUID, "about to lock") {
+ logger.Info("not locking: crunch-run process from previous attempt has not exited")
+ continue
+ }
+ go sch.lockContainer(logger, ctr.UUID)
unalloc[it]--
case arvados.ContainerStateLocked:
if unalloc[it] > 0 {
unalloc[it]--
} else if sch.pool.AtQuota() {
- logger.Debug("not starting: AtQuota and no unalloc workers")
+ // Don't let lower-priority containers
+ // starve this one by using keeping
+ // idle workers alive on different
+ // instance types.
+ logger.Trace("overquota")
overquota = sorted[i:]
break tryrun
- } else {
+ } else if sch.pool.Create(it) {
+ // Success. (Note pool.Create works
+ // asynchronously and does its own
+ // logging about the eventual outcome,
+ // so we don't need to.)
logger.Info("creating new instance")
- err := sch.pool.Create(it)
- if err != nil {
- if _, ok := err.(cloud.QuotaError); !ok {
- logger.WithError(err).Warn("error creating worker")
- }
- sch.queue.Unlock(ctr.UUID)
- // Don't let lower-priority
- // containers starve this one
- // by using keeping idle
- // workers alive on different
- // instance types. TODO:
- // avoid getting starved here
- // if instances of a specific
- // type always fail.
- overquota = sorted[i:]
- break tryrun
- }
+ } else {
+ // Failed despite not being at quota,
+ // e.g., cloud ops throttled. TODO:
+ // avoid getting starved here if
+ // instances of a specific type always
+ // fail.
+ logger.Trace("pool declined to create new instance")
+ continue
}
if dontstart[it] {
// a higher-priority container on the
// same instance type. Don't let this
// one sneak in ahead of it.
+ } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
+ logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
} else if sch.pool.StartContainer(it, ctr) {
// Success.
} else {
+ containerAllocatedWorkerBootingCount += 1
dontstart[it] = true
}
}
}
+ sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))
+ sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota)))
+
if len(overquota) > 0 {
// Unlock any containers that are unmappable while
// we're at quota.
}
}
-// Start an API call to lock the given container, and return
-// immediately while waiting for the response in a new goroutine. Do
-// nothing if a lock request is already in progress for this
-// container.
-func (sch *Scheduler) bgLock(logger logrus.FieldLogger, uuid string) {
- logger.Debug("locking")
- sch.mtx.Lock()
- defer sch.mtx.Unlock()
- if sch.locking[uuid] {
- logger.Debug("locking in progress, doing nothing")
+// Lock the given container. Should be called in a new goroutine.
+func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {
+ if !sch.uuidLock(uuid, "lock") {
return
}
+ defer sch.uuidUnlock(uuid)
if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {
// This happens if the container has been cancelled or
// locked since runQueue called sch.queue.Entries(),
- // possibly by a bgLock() call from a previous
+ // possibly by a lockContainer() call from a previous
// runQueue iteration. In any case, we will respond
// appropriately on the next runQueue iteration, which
// will have already been triggered by the queue
logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing")
return
}
- sch.locking[uuid] = true
- go func() {
- defer func() {
- sch.mtx.Lock()
- defer sch.mtx.Unlock()
- delete(sch.locking, uuid)
- }()
- err := sch.queue.Lock(uuid)
- if err != nil {
- logger.WithError(err).Warn("error locking container")
- return
- }
- logger.Debug("lock succeeded")
- ctr, ok := sch.queue.Get(uuid)
- if !ok {
- logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
- } else if ctr.State != arvados.ContainerStateLocked {
- logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
- }
- }()
+ err := sch.queue.Lock(uuid)
+ if err != nil {
+ logger.WithError(err).Warn("error locking container")
+ return
+ }
+ logger.Debug("lock succeeded")
+ ctr, ok := sch.queue.Get(uuid)
+ if !ok {
+ logger.Error("(BUG?) container disappeared from queue after Lock succeeded")
+ } else if ctr.State != arvados.ContainerStateLocked {
+ logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State)
+ }
+}
+
+// Acquire a non-blocking lock for specified UUID, returning true if
+// successful. The op argument is used only for debug logs.
+//
+// If the lock is not available, uuidLock arranges to wake up the
+// scheduler after a short delay, so it can retry whatever operation
+// is trying to get the lock (if that operation is still worth doing).
+//
+// This mechanism helps avoid spamming the controller/database with
+// concurrent updates for any single container, even when the
+// scheduler loop is running frequently.
+func (sch *Scheduler) uuidLock(uuid, op string) bool {
+ sch.mtx.Lock()
+ defer sch.mtx.Unlock()
+ logger := sch.logger.WithFields(logrus.Fields{
+ "ContainerUUID": uuid,
+ "Op": op,
+ })
+ if op, locked := sch.uuidOp[uuid]; locked {
+ logger.Debugf("uuidLock not available, Op=%s in progress", op)
+ // Make sure the scheduler loop wakes up to retry.
+ sch.wakeup.Reset(time.Second / 4)
+ return false
+ }
+ logger.Debug("uuidLock acquired")
+ sch.uuidOp[uuid] = op
+ return true
+}
+
+func (sch *Scheduler) uuidUnlock(uuid string) {
+ sch.mtx.Lock()
+ defer sch.mtx.Unlock()
+ delete(sch.uuidOp, uuid)
}