req.Header.Set("Authorization", "OAuth2 "+ac2.AuthToken)
resp, err = arvados.InsecureHTTPClient.Do(req)
c.Assert(err, check.IsNil)
+ defer resp.Body.Close()
err = json.NewDecoder(resp.Body).Decode(&cr)
c.Check(err, check.IsNil)
c.Check(cr.UUID, check.Matches, "z2222-.*")
c.Assert(err, check.IsNil)
req.Header.Set("Content-Type", "application/json")
resp, err := ac1.Do(req)
- c.Assert(err, check.IsNil)
- c.Assert(resp.StatusCode, check.Equals, tt.expectedCode)
+ if c.Check(err, check.IsNil) {
+ c.Assert(resp.StatusCode, check.Equals, tt.expectedCode)
+ resp.Body.Close()
+ }
}
}
var jresp httpserver.ErrorResponse
err := json.NewDecoder(resp.Body).Decode(&jresp)
c.Check(err, check.IsNil)
- c.Assert(jresp.Errors, check.HasLen, 1)
- c.Check(jresp.Errors[0], check.Matches, `.*\(`+respHdr+`\).*`)
+ if c.Check(jresp.Errors, check.HasLen, 1) {
+ c.Check(jresp.Errors[0], check.Matches, `.*\(`+respHdr+`\).*`)
+ }
}
+ resp.Body.Close()
}
}
var quietAfter503 = time.Minute
func (sch *Scheduler) runQueue() {
+ running := sch.pool.Running()
+ unalloc := sch.pool.Unallocated()
+
unsorted, _ := sch.queue.Entries()
sorted := make([]container.QueueEnt, 0, len(unsorted))
for _, ent := range unsorted {
sorted = append(sorted, ent)
}
sort.Slice(sorted, func(i, j int) bool {
+ _, irunning := running[sorted[i].Container.UUID]
+ _, jrunning := running[sorted[j].Container.UUID]
+ if irunning != jrunning {
+ // Ensure the "tryrun" loop (see below) sees
+ // already-scheduled containers first, to
+ // ensure existing supervisor containers are
+ // properly counted before we decide whether
+ // we have room for new ones.
+ return irunning
+ }
ilocked := sorted[i].Container.State == arvados.ContainerStateLocked
jlocked := sorted[j].Container.State == arvados.ContainerStateLocked
if ilocked != jlocked {
}
})
- running := sch.pool.Running()
- unalloc := sch.pool.Unallocated()
-
if t := sch.client.Last503(); t.After(sch.last503time) {
// API has sent an HTTP 503 response since last time
// we checked. Use current #containers - 1 as
case arvados.ContainerStateQueued:
if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
logger.Tracef("not locking: already at maxConcurrency %d", sch.maxConcurrency)
- overquota = sorted[i:]
- break tryrun
+ continue
}
trying++
if unalloc[it] < 1 && sch.pool.AtQuota() {
unalloc[it]--
case arvados.ContainerStateLocked:
if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
- logger.Debugf("not starting: already at maxConcurrency %d", sch.maxConcurrency)
- overquota = sorted[i:]
- break tryrun
+ logger.Tracef("not starting: already at maxConcurrency %d", sch.maxConcurrency)
+ continue
}
trying++
if unalloc[it] > 0 {
}
}
}
+ }
+ if len(overquota) > 0 {
// Shut down idle workers that didn't get any
// containers mapped onto them before we hit quota.
for it, n := range unalloc {
}
}
+// Don't unlock containers or shutdown unalloc (booting/idle) nodes
+// just because some 503 errors caused us to reduce maxConcurrency
+// below the current load level.
+//
+// We expect to raise maxConcurrency soon when we stop seeing 503s. If
+// that doesn't happen soon, the idle timeout will take care of the
+// excess nodes.
+func (*SchedulerSuite) TestIdleIn503QuietPeriod(c *check.C) {
+ ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+ queue := test.Queue{
+ ChooseType: chooseType,
+ Containers: []arvados.Container{
+ // scheduled on an instance (but not Running yet)
+ {
+ UUID: test.ContainerUUID(1),
+ Priority: 1000,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 2,
+ RAM: 2 << 30,
+ },
+ },
+ // not yet scheduled
+ {
+ UUID: test.ContainerUUID(2),
+ Priority: 1000,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 2,
+ RAM: 2 << 30,
+ },
+ },
+ // scheduled on an instance (but not Running yet)
+ {
+ UUID: test.ContainerUUID(3),
+ Priority: 1000,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 3,
+ RAM: 3 << 30,
+ },
+ },
+ // not yet scheduled
+ {
+ UUID: test.ContainerUUID(4),
+ Priority: 1000,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 3,
+ RAM: 3 << 30,
+ },
+ },
+ // not yet locked
+ {
+ UUID: test.ContainerUUID(5),
+ Priority: 1000,
+ State: arvados.ContainerStateQueued,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 3,
+ RAM: 3 << 30,
+ },
+ },
+ },
+ }
+ queue.Update()
+ pool := stubPool{
+ quota: 16,
+ unalloc: map[arvados.InstanceType]int{
+ test.InstanceType(2): 2,
+ test.InstanceType(3): 2,
+ },
+ idle: map[arvados.InstanceType]int{
+ test.InstanceType(2): 1,
+ test.InstanceType(3): 1,
+ },
+ running: map[string]time.Time{
+ test.ContainerUUID(1): {},
+ test.ContainerUUID(3): {},
+ },
+ creates: []arvados.InstanceType{},
+ starts: []string{},
+ canCreate: 0,
+ }
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0)
+ sch.last503time = time.Now()
+ sch.maxConcurrency = 3
+ sch.sync()
+ sch.runQueue()
+ sch.sync()
+
+ c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})
+ c.Check(pool.shutdowns, check.Equals, 0)
+ c.Check(pool.creates, check.HasLen, 0)
+ c.Check(queue.StateChanges(), check.HasLen, 0)
+}
+
+// If we somehow have more supervisor containers in Locked state than
+// we should (e.g., config changed since they started), and some
+// appropriate-sized instances booting up, unlock the excess
+// supervisor containers, but let the instances keep booting.
+func (*SchedulerSuite) TestUnlockExcessSupervisors(c *check.C) {
+ ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+ queue := test.Queue{
+ ChooseType: chooseType,
+ }
+ for i := 1; i <= 6; i++ {
+ queue.Containers = append(queue.Containers, arvados.Container{
+ UUID: test.ContainerUUID(i),
+ Priority: int64(1000 - i),
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 2,
+ RAM: 2 << 30,
+ },
+ SchedulingParameters: arvados.SchedulingParameters{
+ Supervisor: true,
+ },
+ })
+ }
+ queue.Update()
+ pool := stubPool{
+ quota: 16,
+ unalloc: map[arvados.InstanceType]int{
+ test.InstanceType(2): 2,
+ },
+ idle: map[arvados.InstanceType]int{
+ test.InstanceType(2): 1,
+ },
+ running: map[string]time.Time{
+ test.ContainerUUID(1): {},
+ test.ContainerUUID(2): {},
+ test.ContainerUUID(3): {},
+ test.ContainerUUID(4): {},
+ },
+ creates: []arvados.InstanceType{},
+ starts: []string{},
+ canCreate: 0,
+ }
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 4)
+ sch.sync()
+ sch.runQueue()
+ sch.sync()
+
+ c.Check(pool.starts, check.DeepEquals, []string{})
+ c.Check(pool.shutdowns, check.Equals, 0)
+ c.Check(pool.creates, check.HasLen, 0)
+ c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
+ {UUID: test.ContainerUUID(5), From: "Locked", To: "Queued"},
+ {UUID: test.ContainerUUID(6), From: "Locked", To: "Queued"},
+ })
+}
+
+// Assuming we're not at quota, don't try to shutdown idle nodes
+// merely because we have more queued/locked supervisor containers
+// than MaxSupervisors -- it won't help.
+func (*SchedulerSuite) TestExcessSupervisors(c *check.C) {
+ ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+ queue := test.Queue{
+ ChooseType: chooseType,
+ }
+ for i := 1; i <= 8; i++ {
+ queue.Containers = append(queue.Containers, arvados.Container{
+ UUID: test.ContainerUUID(i),
+ Priority: int64(1000 + i),
+ State: arvados.ContainerStateQueued,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 2,
+ RAM: 2 << 30,
+ },
+ SchedulingParameters: arvados.SchedulingParameters{
+ Supervisor: true,
+ },
+ })
+ }
+ for i := 2; i < 4; i++ {
+ queue.Containers[i].State = arvados.ContainerStateLocked
+ }
+ for i := 4; i < 6; i++ {
+ queue.Containers[i].State = arvados.ContainerStateRunning
+ }
+ queue.Update()
+ pool := stubPool{
+ quota: 16,
+ unalloc: map[arvados.InstanceType]int{
+ test.InstanceType(2): 2,
+ },
+ idle: map[arvados.InstanceType]int{
+ test.InstanceType(2): 1,
+ },
+ running: map[string]time.Time{
+ test.ContainerUUID(5): {},
+ test.ContainerUUID(6): {},
+ },
+ creates: []arvados.InstanceType{},
+ starts: []string{},
+ canCreate: 0,
+ }
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 4)
+ sch.sync()
+ sch.runQueue()
+ sch.sync()
+
+ c.Check(pool.starts, check.HasLen, 2)
+ c.Check(pool.shutdowns, check.Equals, 0)
+ c.Check(pool.creates, check.HasLen, 0)
+ c.Check(queue.StateChanges(), check.HasLen, 0)
+}
+
// Don't flap lock/unlock when equal-priority containers compete for
// limited workers.
//