X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/407fc461d20ece8b11b7b56f29a3caff3083ff8d..00f3f818027bdc69178bb54e49802baf20d9527d:/lib/dispatchcloud/scheduler/run_queue_test.go diff --git a/lib/dispatchcloud/scheduler/run_queue_test.go b/lib/dispatchcloud/scheduler/run_queue_test.go index fd1d0a870b..3278c7de69 100644 --- a/lib/dispatchcloud/scheduler/run_queue_test.go +++ b/lib/dispatchcloud/scheduler/run_queue_test.go @@ -52,7 +52,14 @@ type stubPool struct { func (p *stubPool) AtQuota() bool { p.Lock() defer p.Unlock() - return len(p.unalloc)+len(p.running)+len(p.unknown) >= p.quota + n := len(p.running) + for _, nn := range p.unalloc { + n += nn + } + for _, nn := range p.unknown { + n += nn + } + return n >= p.quota } func (p *stubPool) Subscribe() <-chan struct{} { return p.notify } func (p *stubPool) Unsubscribe(<-chan struct{}) {} @@ -188,7 +195,7 @@ func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) { running: map[string]time.Time{}, canCreate: 0, } - New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond).runQueue() + New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0).runQueue() c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1), test.InstanceType(1), test.InstanceType(1)}) c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)}) c.Check(pool.running, check.HasLen, 1) @@ -201,12 +208,8 @@ func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) { // call Create(). func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) { ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c)) - for quota := 1; quota < 3; quota++ { + for quota := 1; quota <= 3; quota++ { c.Logf("quota=%d", quota) - shouldCreate := []arvados.InstanceType{} - for i := 1; i < quota; i++ { - shouldCreate = append(shouldCreate, test.InstanceType(3)) - } queue := test.Queue{ ChooseType: chooseType, Containers: []arvados.Container{ @@ -244,18 +247,96 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) { starts: []string{}, canCreate: 0, } - New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond).runQueue() - c.Check(pool.creates, check.DeepEquals, shouldCreate) - if len(shouldCreate) == 0 { - c.Check(pool.starts, check.DeepEquals, []string{}) - c.Check(pool.shutdowns, check.Not(check.Equals), 0) - } else { + sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0) + sch.sync() + sch.runQueue() + sch.sync() + switch quota { + case 1, 2: + // Can't create a type3 node for ctr3, so we + // shutdown an unallocated node (type2), and + // unlock both containers. + c.Check(pool.starts, check.HasLen, 0) + c.Check(pool.shutdowns, check.Equals, 1) + c.Check(pool.creates, check.HasLen, 0) + c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{ + {UUID: test.ContainerUUID(3), From: "Locked", To: "Queued"}, + {UUID: test.ContainerUUID(2), From: "Locked", To: "Queued"}, + }) + case 3: + // Creating a type3 instance works, so we + // start ctr2 on a type2 instance, and leave + // ctr3 locked while we wait for the new + // instance to come up. c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)}) c.Check(pool.shutdowns, check.Equals, 0) + c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(3)}) + c.Check(queue.StateChanges(), check.HasLen, 0) + default: + panic("test not written for quota>3") } } } +// Don't flap lock/unlock when equal-priority containers compete for +// limited workers. +// +// (Unless we use FirstSeenAt as a secondary sort key, each runQueue() +// tends to choose a different one of the equal-priority containers as +// the "first" one that should be locked, and unlock the one it chose +// last time. This generates logging noise, and fails containers by +// reaching MaxDispatchAttempts quickly.) +func (*SchedulerSuite) TestEqualPriorityContainers(c *check.C) { + logger := ctxlog.TestLogger(c) + ctx := ctxlog.Context(context.Background(), logger) + queue := test.Queue{ + ChooseType: chooseType, + Logger: logger, + } + for i := 0; i < 8; i++ { + queue.Containers = append(queue.Containers, arvados.Container{ + UUID: test.ContainerUUID(i), + Priority: 333, + State: arvados.ContainerStateQueued, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 3, + RAM: 3 << 30, + }, + }) + } + queue.Update() + pool := stubPool{ + quota: 2, + unalloc: map[arvados.InstanceType]int{ + test.InstanceType(3): 2, + }, + idle: map[arvados.InstanceType]int{ + test.InstanceType(3): 2, + }, + running: map[string]time.Time{}, + creates: []arvados.InstanceType{}, + starts: []string{}, + canCreate: 0, + } + sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0) + for i := 0; i < 30; i++ { + sch.runQueue() + sch.sync() + time.Sleep(time.Millisecond) + } + c.Check(pool.shutdowns, check.Equals, 0) + c.Check(pool.starts, check.HasLen, 2) + unlocked := map[string]int{} + for _, chg := range queue.StateChanges() { + if chg.To == arvados.ContainerStateQueued { + unlocked[chg.UUID]++ + } + } + for uuid, count := range unlocked { + c.Check(count, check.Equals, 1, check.Commentf("%s", uuid)) + } +} + // Start lower-priority containers while waiting for new/existing // workers to come up for higher-priority containers. func (*SchedulerSuite) TestStartWhileCreating(c *check.C) { @@ -339,7 +420,7 @@ func (*SchedulerSuite) TestStartWhileCreating(c *check.C) { }, } queue.Update() - New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond).runQueue() + New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0).runQueue() c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(2), test.InstanceType(1)}) c.Check(pool.starts, check.DeepEquals, []string{uuids[6], uuids[5], uuids[3], uuids[2]}) running := map[string]bool{} @@ -383,7 +464,7 @@ func (*SchedulerSuite) TestKillNonexistentContainer(c *check.C) { }, } queue.Update() - sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond) + sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0) c.Check(pool.running, check.HasLen, 1) sch.sync() for deadline := time.Now().Add(time.Second); len(pool.Running()) > 0 && time.Now().Before(deadline); time.Sleep(time.Millisecond) { @@ -416,7 +497,7 @@ func (*SchedulerSuite) TestContainersMetrics(c *check.C) { pool := stubPool{ unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1}, } - sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond) + sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0) sch.runQueue() sch.updateMetrics() @@ -428,7 +509,7 @@ func (*SchedulerSuite) TestContainersMetrics(c *check.C) { // 'over quota' metric will be 1 because no workers are available and canCreate defaults // to zero. pool = stubPool{} - sch = New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond) + sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0) sch.runQueue() sch.updateMetrics() @@ -461,9 +542,81 @@ func (*SchedulerSuite) TestContainersMetrics(c *check.C) { unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1}, running: map[string]time.Time{}, } - sch = New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond) + sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0) sch.runQueue() sch.updateMetrics() c.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 0) } + +// Assign priority=4, 3 and 1 containers to idle nodes. Ignore the supervisor at priority 2. +func (*SchedulerSuite) TestSkipSupervisors(c *check.C) { + ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c)) + queue := test.Queue{ + ChooseType: chooseType, + Containers: []arvados.Container{ + { + UUID: test.ContainerUUID(1), + Priority: 1, + State: arvados.ContainerStateLocked, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 1, + RAM: 1 << 30, + }, + }, + { + UUID: test.ContainerUUID(2), + Priority: 2, + State: arvados.ContainerStateLocked, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 1, + RAM: 1 << 30, + }, + SchedulingParameters: arvados.SchedulingParameters{ + Supervisor: true, + }, + }, + { + UUID: test.ContainerUUID(3), + Priority: 3, + State: arvados.ContainerStateLocked, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 1, + RAM: 1 << 30, + }, + SchedulingParameters: arvados.SchedulingParameters{ + Supervisor: true, + }, + }, + { + UUID: test.ContainerUUID(4), + Priority: 4, + State: arvados.ContainerStateLocked, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 1, + RAM: 1 << 30, + }, + SchedulingParameters: arvados.SchedulingParameters{ + Supervisor: true, + }, + }, + }, + } + queue.Update() + pool := stubPool{ + quota: 1000, + unalloc: map[arvados.InstanceType]int{ + test.InstanceType(1): 4, + test.InstanceType(2): 4, + }, + idle: map[arvados.InstanceType]int{ + test.InstanceType(1): 4, + test.InstanceType(2): 4, + }, + running: map[string]time.Time{}, + canCreate: 0, + } + New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 2).runQueue() + c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType(nil)) + c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4), test.ContainerUUID(3), test.ContainerUUID(1)}) +}