18947: Update boot recipe for keep-balance.
[arvados.git] / lib / dispatchcloud / scheduler / run_queue_test.go
index fd1d0a870b7ac9f34f9d1dd39f250fed62b4a099..5b5fa960a1f5e167b0175ffcabff3873ac8419a1 100644 (file)
@@ -244,15 +244,81 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
                        starts:    []string{},
                        canCreate: 0,
                }
-               New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond).runQueue()
+               sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+               sch.runQueue()
+               sch.sync()
+               sch.runQueue()
+               sch.sync()
                c.Check(pool.creates, check.DeepEquals, shouldCreate)
                if len(shouldCreate) == 0 {
                        c.Check(pool.starts, check.DeepEquals, []string{})
-                       c.Check(pool.shutdowns, check.Not(check.Equals), 0)
                } else {
                        c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})
-                       c.Check(pool.shutdowns, check.Equals, 0)
                }
+               c.Check(pool.shutdowns, check.Equals, 3-quota)
+               c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
+                       {UUID: "zzzzz-dz642-000000000000003", From: "Locked", To: "Queued"},
+                       {UUID: "zzzzz-dz642-000000000000002", From: "Locked", To: "Queued"},
+               })
+       }
+}
+
+// Don't flap lock/unlock when equal-priority containers compete for
+// limited workers.
+//
+// (Unless we use FirstSeenAt as a secondary sort key, each runQueue()
+// tends to choose a different one of the equal-priority containers as
+// the "first" one that should be locked, and unlock the one it chose
+// last time. This generates logging noise, and fails containers by
+// reaching MaxDispatchAttempts quickly.)
+func (*SchedulerSuite) TestEqualPriorityContainers(c *check.C) {
+       logger := ctxlog.TestLogger(c)
+       ctx := ctxlog.Context(context.Background(), logger)
+       queue := test.Queue{
+               ChooseType: chooseType,
+               Logger:     logger,
+       }
+       for i := 0; i < 8; i++ {
+               queue.Containers = append(queue.Containers, arvados.Container{
+                       UUID:     test.ContainerUUID(i),
+                       Priority: 333,
+                       State:    arvados.ContainerStateQueued,
+                       RuntimeConstraints: arvados.RuntimeConstraints{
+                               VCPUs: 3,
+                               RAM:   3 << 30,
+                       },
+               })
+       }
+       queue.Update()
+       pool := stubPool{
+               quota: 2,
+               unalloc: map[arvados.InstanceType]int{
+                       test.InstanceType(3): 1,
+               },
+               idle: map[arvados.InstanceType]int{
+                       test.InstanceType(3): 1,
+               },
+               running:   map[string]time.Time{},
+               creates:   []arvados.InstanceType{},
+               starts:    []string{},
+               canCreate: 1,
+       }
+       sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+       for i := 0; i < 30; i++ {
+               sch.runQueue()
+               sch.sync()
+               time.Sleep(time.Millisecond)
+       }
+       c.Check(pool.shutdowns, check.Equals, 0)
+       c.Check(pool.starts, check.HasLen, 1)
+       unlocked := map[string]int{}
+       for _, chg := range queue.StateChanges() {
+               if chg.To == arvados.ContainerStateQueued {
+                       unlocked[chg.UUID]++
+               }
+       }
+       for uuid, count := range unlocked {
+               c.Check(count, check.Equals, 1, check.Commentf("%s", uuid))
        }
 }