X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/ca6c0353c236d5fc0f829880e845cbd8489ea9ad..84c80680c3ac6b0b1cd44de40a05f77eb82ca1c9:/lib/dispatchcloud/scheduler/run_queue.go diff --git a/lib/dispatchcloud/scheduler/run_queue.go b/lib/dispatchcloud/scheduler/run_queue.go index e9fc5f9021..db6e97b596 100644 --- a/lib/dispatchcloud/scheduler/run_queue.go +++ b/lib/dispatchcloud/scheduler/run_queue.go @@ -13,6 +13,8 @@ import ( "github.com/sirupsen/logrus" ) +var quietAfter503 = time.Minute + func (sch *Scheduler) runQueue() { unsorted, _ := sch.queue.Entries() sorted := make([]container.QueueEnt, 0, len(unsorted)) @@ -20,7 +22,19 @@ func (sch *Scheduler) runQueue() { sorted = append(sorted, ent) } sort.Slice(sorted, func(i, j int) bool { - if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj { + ilocked := sorted[i].Container.State == arvados.ContainerStateLocked + jlocked := sorted[j].Container.State == arvados.ContainerStateLocked + if ilocked != jlocked { + // Give precedence to containers that we have + // already locked, even if higher-priority + // containers have since arrived in the + // queue. This avoids undesirable queue churn + // effects including extra lock/unlock cycles + // and bringing up new instances and quickly + // shutting them down to make room for + // different instance sizes. + return ilocked + } else if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj { return pi > pj } else { // When containers have identical priority, @@ -35,29 +49,82 @@ func (sch *Scheduler) runQueue() { running := sch.pool.Running() unalloc := sch.pool.Unallocated() + if t := sch.client.Last503(); t.After(sch.last503time) { + // API has sent an HTTP 503 response since last time + // we checked. Use current #containers - 1 as + // maxConcurrency, i.e., try to stay just below the + // level where we see 503s. + sch.last503time = t + if newlimit := len(running) - 1; newlimit < 1 { + sch.maxConcurrency = 1 + } else { + sch.maxConcurrency = newlimit + } + } else if sch.maxConcurrency > 0 && time.Since(sch.last503time) > quietAfter503 { + // If we haven't seen any 503 errors lately, raise + // limit to ~10% beyond the current workload. + // + // As we use the added 10% to schedule more + // containers, len(running) will increase and we'll + // push the limit up further. Soon enough, + // maxConcurrency will get high enough to schedule the + // entire queue, hit pool quota, or get 503s again. + max := len(running)*11/10 + 1 + if sch.maxConcurrency < max { + sch.maxConcurrency = max + } + } + if sch.last503time.IsZero() { + sch.mLast503Time.Set(0) + } else { + sch.mLast503Time.Set(float64(sch.last503time.Unix())) + } + sch.mMaxContainerConcurrency.Set(float64(sch.maxConcurrency)) + sch.logger.WithFields(logrus.Fields{ - "Containers": len(sorted), - "Processes": len(running), + "Containers": len(sorted), + "Processes": len(running), + "maxConcurrency": sch.maxConcurrency, }).Debug("runQueue") dontstart := map[arvados.InstanceType]bool{} - var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota + var overquota []container.QueueEnt // entries that are unmappable because of worker pool quota + var overmaxsuper []container.QueueEnt // unmappable because max supervisors (these are not included in overquota) var containerAllocatedWorkerBootingCount int + // trying is #containers running + #containers we're trying to + // start. We stop trying to start more containers if this + // reaches the dynamic maxConcurrency limit. + trying := len(running) + + supervisors := 0 + tryrun: - for i, ctr := range sorted { - ctr, it := ctr.Container, ctr.InstanceType + for i, ent := range sorted { + ctr, it := ent.Container, ent.InstanceType logger := sch.logger.WithFields(logrus.Fields{ "ContainerUUID": ctr.UUID, "InstanceType": it.Name, }) + if ctr.SchedulingParameters.Supervisor { + supervisors += 1 + if sch.maxSupervisors > 0 && supervisors > sch.maxSupervisors { + overmaxsuper = append(overmaxsuper, sorted[i]) + continue + } + } if _, running := running[ctr.UUID]; running || ctr.Priority < 1 { continue } switch ctr.State { case arvados.ContainerStateQueued: + if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency { + logger.Tracef("not locking: already at maxConcurrency %d", sch.maxConcurrency) + continue + } + trying++ if unalloc[it] < 1 && sch.pool.AtQuota() { - logger.Debug("not locking: AtQuota and no unalloc workers") + logger.Trace("not locking: AtQuota and no unalloc workers") overquota = sorted[i:] break tryrun } @@ -68,6 +135,11 @@ tryrun: go sch.lockContainer(logger, ctr.UUID) unalloc[it]-- case arvados.ContainerStateLocked: + if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency { + logger.Tracef("not starting: already at maxConcurrency %d", sch.maxConcurrency) + continue + } + trying++ if unalloc[it] > 0 { unalloc[it]-- } else if sch.pool.AtQuota() { @@ -78,10 +150,12 @@ tryrun: logger.Trace("overquota") overquota = sorted[i:] break tryrun - } else if logger.Info("creating new instance"); sch.pool.Create(it) { + } else if sch.pool.Create(it) { // Success. (Note pool.Create works // asynchronously and does its own - // logging, so we don't need to.) + // logging about the eventual outcome, + // so we don't need to.) + logger.Info("creating new instance") } else { // Failed despite not being at quota, // e.g., cloud ops throttled. TODO: @@ -100,8 +174,10 @@ tryrun: } else if sch.pool.KillContainer(ctr.UUID, "about to start") { logger.Info("not restarting yet: crunch-run process from previous attempt has not exited") } else if sch.pool.StartContainer(it, ctr) { + logger.Trace("StartContainer => true") // Success. } else { + logger.Trace("StartContainer => false") containerAllocatedWorkerBootingCount += 1 dontstart[it] = true } @@ -109,16 +185,19 @@ tryrun: } sch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount)) - sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota))) + sch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota) + len(overmaxsuper))) - if len(overquota) > 0 { + if len(overquota)+len(overmaxsuper) > 0 { // Unlock any containers that are unmappable while - // we're at quota. - for _, ctr := range overquota { + // we're at quota (but if they have already been + // scheduled and they're loading docker images etc., + // let them run). + for _, ctr := range append(overmaxsuper, overquota...) { ctr := ctr.Container - if ctr.State == arvados.ContainerStateLocked { + _, toolate := running[ctr.UUID] + if ctr.State == arvados.ContainerStateLocked && !toolate { logger := sch.logger.WithField("ContainerUUID", ctr.UUID) - logger.Debug("unlock because pool capacity is used by higher priority containers") + logger.Info("unlock because pool capacity is used by higher priority containers") err := sch.queue.Unlock(ctr.UUID) if err != nil { logger.WithError(err).Warn("error unlocking")