19973: Fix tests.
authorTom Clegg <tom@curii.com>
Thu, 16 Feb 2023 05:24:46 +0000 (00:24 -0500)
committerTom Clegg <tom@curii.com>
Thu, 16 Feb 2023 05:24:46 +0000 (00:24 -0500)
Stub pool AtQuota() was miscounting instances.

Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@curii.com>

lib/dispatchcloud/scheduler/run_queue.go
lib/dispatchcloud/scheduler/run_queue_test.go

index d0074ae28ec0c59d7887e6aa5cbfa4bd687d1e70..1e5ac2e0466b97730d5eccc9620134d021a66c20 100644 (file)
@@ -154,8 +154,10 @@ tryrun:
                        } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
                                logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
                        } else if sch.pool.StartContainer(it, ctr) {
+                               logger.Trace("StartContainer => true")
                                // Success.
                        } else {
+                               logger.Trace("StartContainer => false")
                                containerAllocatedWorkerBootingCount += 1
                                dontstart[it] = true
                        }
index c6ed2187e5db62efadef2e3337127c0d04e81323..855399337589121c0e26d361febcde3b897fe33e 100644 (file)
@@ -52,7 +52,14 @@ type stubPool struct {
 func (p *stubPool) AtQuota() bool {
        p.Lock()
        defer p.Unlock()
-       return len(p.unalloc)+len(p.running)+len(p.unknown) >= p.quota
+       n := len(p.running)
+       for _, nn := range p.unalloc {
+               n += nn
+       }
+       for _, nn := range p.unknown {
+               n += nn
+       }
+       return n >= p.quota
 }
 func (p *stubPool) Subscribe() <-chan struct{}  { return p.notify }
 func (p *stubPool) Unsubscribe(<-chan struct{}) {}
@@ -201,12 +208,8 @@ func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) {
 // call Create().
 func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
        ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
-       for quota := 1; quota < 3; quota++ {
+       for quota := 1; quota <= 3; quota++ {
                c.Logf("quota=%d", quota)
-               shouldCreate := []arvados.InstanceType{}
-               for i := 1; i < quota; i++ {
-                       shouldCreate = append(shouldCreate, test.InstanceType(3))
-               }
                queue := test.Queue{
                        ChooseType: chooseType,
                        Containers: []arvados.Container{
@@ -245,21 +248,33 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
                        canCreate: 0,
                }
                sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond)
-               sch.runQueue()
                sch.sync()
                sch.runQueue()
                sch.sync()
-               c.Check(pool.creates, check.DeepEquals, shouldCreate)
-               if len(shouldCreate) == 0 {
-                       c.Check(pool.starts, check.DeepEquals, []string{})
-               } else {
+               switch quota {
+               case 1, 2:
+                       // Can't create a type3 node for ctr3, so we
+                       // shutdown an unallocated node (type2), and
+                       // unlock both containers.
+                       c.Check(pool.starts, check.HasLen, 0)
+                       c.Check(pool.shutdowns, check.Equals, 1)
+                       c.Check(pool.creates, check.HasLen, 0)
+                       c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
+                               {UUID: test.ContainerUUID(3), From: "Locked", To: "Queued"},
+                               {UUID: test.ContainerUUID(2), From: "Locked", To: "Queued"},
+                       })
+               case 3:
+                       // Creating a type3 instance works, so we
+                       // start ctr2 on a type2 instance, and leave
+                       // ctr3 locked while we wait for the new
+                       // instance to come up.
                        c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})
+                       c.Check(pool.shutdowns, check.Equals, 0)
+                       c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(3)})
+                       c.Check(queue.StateChanges(), check.HasLen, 0)
+               default:
+                       panic("test not written for quota>3")
                }
-               c.Check(pool.shutdowns, check.Equals, 3-quota)
-               c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
-                       {UUID: "zzzzz-dz642-000000000000003", From: "Locked", To: "Queued"},
-                       {UUID: "zzzzz-dz642-000000000000002", From: "Locked", To: "Queued"},
-               })
        }
 }
 
@@ -293,15 +308,15 @@ func (*SchedulerSuite) TestEqualPriorityContainers(c *check.C) {
        pool := stubPool{
                quota: 2,
                unalloc: map[arvados.InstanceType]int{
-                       test.InstanceType(3): 1,
+                       test.InstanceType(3): 2,
                },
                idle: map[arvados.InstanceType]int{
-                       test.InstanceType(3): 1,
+                       test.InstanceType(3): 2,
                },
                running:   map[string]time.Time{},
                creates:   []arvados.InstanceType{},
                starts:    []string{},
-               canCreate: 1,
+               canCreate: 0,
        }
        sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond)
        for i := 0; i < 30; i++ {
@@ -310,7 +325,7 @@ func (*SchedulerSuite) TestEqualPriorityContainers(c *check.C) {
                time.Sleep(time.Millisecond)
        }
        c.Check(pool.shutdowns, check.Equals, 0)
-       c.Check(pool.starts, check.HasLen, 1)
+       c.Check(pool.starts, check.HasLen, 2)
        unlocked := map[string]int{}
        for _, chg := range queue.StateChanges() {
                if chg.To == arvados.ContainerStateQueued {