func (p *stubPool) AtQuota() bool {
p.Lock()
defer p.Unlock()
- return len(p.unalloc)+len(p.running)+len(p.unknown) >= p.quota
+ n := len(p.running)
+ for _, nn := range p.unalloc {
+ n += nn
+ }
+ for _, nn := range p.unknown {
+ n += nn
+ }
+ return n >= p.quota
}
func (p *stubPool) Subscribe() <-chan struct{} { return p.notify }
func (p *stubPool) Unsubscribe(<-chan struct{}) {}
running: map[string]time.Time{},
canCreate: 0,
}
- New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond).runQueue()
+ New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0).runQueue()
c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1), test.InstanceType(1), test.InstanceType(1)})
c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})
c.Check(pool.running, check.HasLen, 1)
// call Create().
func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
- for quota := 1; quota < 3; quota++ {
+ for quota := 1; quota <= 3; quota++ {
c.Logf("quota=%d", quota)
- shouldCreate := []arvados.InstanceType{}
- for i := 1; i < quota; i++ {
- shouldCreate = append(shouldCreate, test.InstanceType(3))
- }
queue := test.Queue{
ChooseType: chooseType,
Containers: []arvados.Container{
starts: []string{},
canCreate: 0,
}
- sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond)
- sch.runQueue()
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0)
sch.sync()
sch.runQueue()
sch.sync()
- c.Check(pool.creates, check.DeepEquals, shouldCreate)
- if len(shouldCreate) == 0 {
- c.Check(pool.starts, check.DeepEquals, []string{})
- } else {
+ switch quota {
+ case 1, 2:
+ // Can't create a type3 node for ctr3, so we
+ // shutdown an unallocated node (type2), and
+ // unlock both containers.
+ c.Check(pool.starts, check.HasLen, 0)
+ c.Check(pool.shutdowns, check.Equals, 1)
+ c.Check(pool.creates, check.HasLen, 0)
+ c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
+ {UUID: test.ContainerUUID(3), From: "Locked", To: "Queued"},
+ {UUID: test.ContainerUUID(2), From: "Locked", To: "Queued"},
+ })
+ case 3:
+ // Creating a type3 instance works, so we
+ // start ctr2 on a type2 instance, and leave
+ // ctr3 locked while we wait for the new
+ // instance to come up.
c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})
+ c.Check(pool.shutdowns, check.Equals, 0)
+ c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(3)})
+ c.Check(queue.StateChanges(), check.HasLen, 0)
+ default:
+ panic("test not written for quota>3")
}
- c.Check(pool.shutdowns, check.Equals, 3-quota)
- c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
- {UUID: "zzzzz-dz642-000000000000003", From: "Locked", To: "Queued"},
- {UUID: "zzzzz-dz642-000000000000002", From: "Locked", To: "Queued"},
- })
}
}
pool := stubPool{
quota: 2,
unalloc: map[arvados.InstanceType]int{
- test.InstanceType(3): 1,
+ test.InstanceType(3): 2,
},
idle: map[arvados.InstanceType]int{
- test.InstanceType(3): 1,
+ test.InstanceType(3): 2,
},
running: map[string]time.Time{},
creates: []arvados.InstanceType{},
starts: []string{},
- canCreate: 1,
+ canCreate: 0,
}
- sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0)
for i := 0; i < 30; i++ {
sch.runQueue()
sch.sync()
time.Sleep(time.Millisecond)
}
c.Check(pool.shutdowns, check.Equals, 0)
- c.Check(pool.starts, check.HasLen, 1)
+ c.Check(pool.starts, check.HasLen, 2)
unlocked := map[string]int{}
for _, chg := range queue.StateChanges() {
if chg.To == arvados.ContainerStateQueued {
},
}
queue.Update()
- New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond).runQueue()
+ New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0).runQueue()
c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(2), test.InstanceType(1)})
c.Check(pool.starts, check.DeepEquals, []string{uuids[6], uuids[5], uuids[3], uuids[2]})
running := map[string]bool{}
},
}
queue.Update()
- sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0)
c.Check(pool.running, check.HasLen, 1)
sch.sync()
for deadline := time.Now().Add(time.Second); len(pool.Running()) > 0 && time.Now().Before(deadline); time.Sleep(time.Millisecond) {
pool := stubPool{
unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1},
}
- sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0)
sch.runQueue()
sch.updateMetrics()
// 'over quota' metric will be 1 because no workers are available and canCreate defaults
// to zero.
pool = stubPool{}
- sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0)
sch.runQueue()
sch.updateMetrics()
unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1},
running: map[string]time.Time{},
}
- sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0)
sch.runQueue()
sch.updateMetrics()
c.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 0)
}
+
+// Assign priority=4, 3 and 1 containers to idle nodes. Ignore the supervisor at priority 2.
+func (*SchedulerSuite) TestSkipSupervisors(c *check.C) {
+ ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+ queue := test.Queue{
+ ChooseType: chooseType,
+ Containers: []arvados.Container{
+ {
+ UUID: test.ContainerUUID(1),
+ Priority: 1,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 1,
+ RAM: 1 << 30,
+ },
+ },
+ {
+ UUID: test.ContainerUUID(2),
+ Priority: 2,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 1,
+ RAM: 1 << 30,
+ },
+ SchedulingParameters: arvados.SchedulingParameters{
+ Supervisor: true,
+ },
+ },
+ {
+ UUID: test.ContainerUUID(3),
+ Priority: 3,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 1,
+ RAM: 1 << 30,
+ },
+ SchedulingParameters: arvados.SchedulingParameters{
+ Supervisor: true,
+ },
+ },
+ {
+ UUID: test.ContainerUUID(4),
+ Priority: 4,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 1,
+ RAM: 1 << 30,
+ },
+ SchedulingParameters: arvados.SchedulingParameters{
+ Supervisor: true,
+ },
+ },
+ },
+ }
+ queue.Update()
+ pool := stubPool{
+ quota: 1000,
+ unalloc: map[arvados.InstanceType]int{
+ test.InstanceType(1): 4,
+ test.InstanceType(2): 4,
+ },
+ idle: map[arvados.InstanceType]int{
+ test.InstanceType(1): 4,
+ test.InstanceType(2): 4,
+ },
+ running: map[string]time.Time{},
+ canCreate: 0,
+ }
+ New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 2).runQueue()
+ c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType(nil))
+ c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4), test.ContainerUUID(3), test.ContainerUUID(1)})
+}