c.Check(queue.StateChanges(), check.HasLen, 0)
}
+// If we somehow have more supervisor containers in Locked state than
+// we should (e.g., config changed since they started), and some
+// appropriate-sized instances booting up, unlock the excess
+// supervisor containers, but let the instances keep booting.
+func (*SchedulerSuite) TestUnlockExcessSupervisors(c *check.C) {
+ ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+ queue := test.Queue{
+ ChooseType: chooseType,
+ }
+ for i := 1; i <= 6; i++ {
+ queue.Containers = append(queue.Containers, arvados.Container{
+ UUID: test.ContainerUUID(i),
+ Priority: int64(1000 - i),
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 2,
+ RAM: 2 << 30,
+ },
+ SchedulingParameters: arvados.SchedulingParameters{
+ Supervisor: true,
+ },
+ })
+ }
+ queue.Update()
+ pool := stubPool{
+ quota: 16,
+ unalloc: map[arvados.InstanceType]int{
+ test.InstanceType(2): 2,
+ },
+ idle: map[arvados.InstanceType]int{
+ test.InstanceType(2): 1,
+ },
+ running: map[string]time.Time{
+ test.ContainerUUID(1): {},
+ test.ContainerUUID(2): {},
+ test.ContainerUUID(3): {},
+ test.ContainerUUID(4): {},
+ },
+ creates: []arvados.InstanceType{},
+ starts: []string{},
+ canCreate: 0,
+ }
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 4)
+ sch.sync()
+ sch.runQueue()
+ sch.sync()
+
+ c.Check(pool.starts, check.DeepEquals, []string{})
+ c.Check(pool.shutdowns, check.Equals, 0)
+ c.Check(pool.creates, check.HasLen, 0)
+ c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
+ {UUID: test.ContainerUUID(5), From: "Locked", To: "Queued"},
+ {UUID: test.ContainerUUID(6), From: "Locked", To: "Queued"},
+ })
+}
+
// Don't flap lock/unlock when equal-priority containers compete for
// limited workers.
//