X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/17479bd75a29c52470abe0049cb447e114eb39e9..10d70a1c08984a699ac3f6b893fe6d2141c5ad9e:/lib/dispatchcloud/scheduler/run_queue.go diff --git a/lib/dispatchcloud/scheduler/run_queue.go b/lib/dispatchcloud/scheduler/run_queue.go index 9fc1a16580..d102d2fd20 100644 --- a/lib/dispatchcloud/scheduler/run_queue.go +++ b/lib/dispatchcloud/scheduler/run_queue.go @@ -6,11 +6,11 @@ package scheduler import ( "sort" + "time" - "git.curoverse.com/arvados.git/lib/cloud" "git.curoverse.com/arvados.git/lib/dispatchcloud/container" "git.curoverse.com/arvados.git/sdk/go/arvados" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) func (sch *Scheduler) runQueue() { @@ -34,6 +34,7 @@ func (sch *Scheduler) runQueue() { dontstart := map[arvados.InstanceType]bool{} var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota +tryrun: for i, ctr := range sorted { ctr, it := ctr.Container, ctr.InstanceType logger := sch.logger.WithFields(logrus.Fields{ @@ -43,60 +44,55 @@ func (sch *Scheduler) runQueue() { if _, running := running[ctr.UUID]; running || ctr.Priority < 1 { continue } - if ctr.State == arvados.ContainerStateQueued { + switch ctr.State { + case arvados.ContainerStateQueued: if unalloc[it] < 1 && sch.pool.AtQuota() { - logger.Debugf("not locking: AtQuota and no unalloc workers") + logger.Debug("not locking: AtQuota and no unalloc workers") overquota = sorted[i:] - break + break tryrun } - logger.Debugf("locking") - err := sch.queue.Lock(ctr.UUID) - if err != nil { - logger.WithError(err).Warnf("lock error") - unalloc[it]++ - continue - } - var ok bool - ctr, ok = sch.queue.Get(ctr.UUID) - if !ok { - logger.Error("(BUG?) container disappeared from queue after Lock succeeded") - continue - } - if ctr.State != arvados.ContainerStateLocked { - logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State) - } - } - if ctr.State != arvados.ContainerStateLocked { - continue - } - if unalloc[it] < 1 { - logger.Info("creating new instance") - err := sch.pool.Create(it) - if err != nil { - if _, ok := err.(cloud.QuotaError); !ok { - logger.WithError(err).Warn("error creating worker") - } - sch.queue.Unlock(ctr.UUID) - // Don't let lower-priority containers - // starve this one by using keeping - // idle workers alive on different - // instance types. TODO: avoid - // getting starved here if instances - // of a specific type always fail. + go sch.lockContainer(logger, ctr.UUID) + unalloc[it]-- + case arvados.ContainerStateLocked: + if unalloc[it] > 0 { + unalloc[it]-- + } else if sch.pool.AtQuota() { + logger.Debug("not starting: AtQuota and no unalloc workers") overquota = sorted[i:] - break + break tryrun + } else { + logger.Info("creating new instance") + if !sch.pool.Create(it) { + // (Note pool.Create works + // asynchronously and logs its + // own failures, so we don't + // need to log this as a + // failure.) + + sch.queue.Unlock(ctr.UUID) + // Don't let lower-priority + // containers starve this one + // by using keeping idle + // workers alive on different + // instance types. TODO: + // avoid getting starved here + // if instances of a specific + // type always fail. + overquota = sorted[i:] + break tryrun + } + } + + if dontstart[it] { + // We already tried & failed to start + // a higher-priority container on the + // same instance type. Don't let this + // one sneak in ahead of it. + } else if sch.pool.StartContainer(it, ctr) { + // Success. + } else { + dontstart[it] = true } - unalloc[it]++ - } - if dontstart[it] { - // We already tried & failed to start a - // higher-priority container on the same - // instance type. Don't let this one sneak in - // ahead of it. - } else if sch.pool.StartContainer(it, ctr) { - unalloc[it]-- - } else { - dontstart[it] = true } } @@ -124,3 +120,68 @@ func (sch *Scheduler) runQueue() { } } } + +// Lock the given container. Should be called in a new goroutine. +func (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) { + if !sch.uuidLock(uuid, "lock") { + return + } + defer sch.uuidUnlock(uuid) + if ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued { + // This happens if the container has been cancelled or + // locked since runQueue called sch.queue.Entries(), + // possibly by a lockContainer() call from a previous + // runQueue iteration. In any case, we will respond + // appropriately on the next runQueue iteration, which + // will have already been triggered by the queue + // update. + logger.WithField("State", ctr.State).Debug("container no longer queued by the time we decided to lock it, doing nothing") + return + } + err := sch.queue.Lock(uuid) + if err != nil { + logger.WithError(err).Warn("error locking container") + return + } + logger.Debug("lock succeeded") + ctr, ok := sch.queue.Get(uuid) + if !ok { + logger.Error("(BUG?) container disappeared from queue after Lock succeeded") + } else if ctr.State != arvados.ContainerStateLocked { + logger.Warnf("(race?) container has state=%q after Lock succeeded", ctr.State) + } +} + +// Acquire a non-blocking lock for specified UUID, returning true if +// successful. The op argument is used only for debug logs. +// +// If the lock is not available, uuidLock arranges to wake up the +// scheduler after a short delay, so it can retry whatever operation +// is trying to get the lock (if that operation is still worth doing). +// +// This mechanism helps avoid spamming the controller/database with +// concurrent updates for any single container, even when the +// scheduler loop is running frequently. +func (sch *Scheduler) uuidLock(uuid, op string) bool { + sch.mtx.Lock() + defer sch.mtx.Unlock() + logger := sch.logger.WithFields(logrus.Fields{ + "ContainerUUID": uuid, + "Op": op, + }) + if op, locked := sch.uuidOp[uuid]; locked { + logger.Debugf("uuidLock not available, Op=%s in progress", op) + // Make sure the scheduler loop wakes up to retry. + sch.wakeup.Reset(time.Second / 4) + return false + } + logger.Debug("uuidLock acquired") + sch.uuidOp[uuid] = op + return true +} + +func (sch *Scheduler) uuidUnlock(uuid string) { + sch.mtx.Lock() + defer sch.mtx.Unlock() + delete(sch.uuidOp, uuid) +}