17417: Merge branch 'main' into 17417-add-arm64
[arvados.git] / lib / dispatchcloud / scheduler / run_queue_test.go
index 090a534355e27ed4b0ef117979bb35bc9e272eb4..5b5fa960a1f5e167b0175ffcabff3873ac8419a1 100644 (file)
@@ -5,19 +5,21 @@
 package scheduler
 
 import (
+       "context"
        "sync"
        "time"
 
-       "git.curoverse.com/arvados.git/lib/dispatchcloud/test"
-       "git.curoverse.com/arvados.git/lib/dispatchcloud/worker"
-       "git.curoverse.com/arvados.git/sdk/go/arvados"
-       "github.com/sirupsen/logrus"
+       "git.arvados.org/arvados.git/lib/dispatchcloud/test"
+       "git.arvados.org/arvados.git/lib/dispatchcloud/worker"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+
+       "github.com/prometheus/client_golang/prometheus/testutil"
+
        check "gopkg.in/check.v1"
 )
 
 var (
-       logger = logrus.StandardLogger()
-
        // arbitrary example container UUIDs
        uuids = func() (r []string) {
                for i := 0; i < 16; i++ {
@@ -37,8 +39,9 @@ type stubPool struct {
        notify    <-chan struct{}
        unalloc   map[arvados.InstanceType]int // idle+booting+unknown
        idle      map[arvados.InstanceType]int
+       unknown   map[arvados.InstanceType]int
        running   map[string]time.Time
-       atQuota   bool
+       quota     int
        canCreate int
        creates   []arvados.InstanceType
        starts    []string
@@ -46,7 +49,11 @@ type stubPool struct {
        sync.Mutex
 }
 
-func (p *stubPool) AtQuota() bool               { return p.atQuota }
+func (p *stubPool) AtQuota() bool {
+       p.Lock()
+       defer p.Unlock()
+       return len(p.unalloc)+len(p.running)+len(p.unknown) >= p.quota
+}
 func (p *stubPool) Subscribe() <-chan struct{}  { return p.notify }
 func (p *stubPool) Unsubscribe(<-chan struct{}) {}
 func (p *stubPool) Running() map[string]time.Time {
@@ -63,7 +70,7 @@ func (p *stubPool) Unallocated() map[arvados.InstanceType]int {
        defer p.Unlock()
        r := map[arvados.InstanceType]int{}
        for it, n := range p.unalloc {
-               r[it] = n
+               r[it] = n - p.unknown[it]
        }
        return r
 }
@@ -78,10 +85,14 @@ func (p *stubPool) Create(it arvados.InstanceType) bool {
        p.unalloc[it]++
        return true
 }
-func (p *stubPool) KillContainer(uuid string) {
+func (p *stubPool) ForgetContainer(uuid string) {
+}
+func (p *stubPool) KillContainer(uuid, reason string) bool {
        p.Lock()
        defer p.Unlock()
-       delete(p.running, uuid)
+       defer delete(p.running, uuid)
+       t, ok := p.running[uuid]
+       return ok && t.IsZero()
 }
 func (p *stubPool) Shutdown(arvados.InstanceType) bool {
        p.shutdowns++
@@ -94,6 +105,7 @@ func (p *stubPool) CountWorkers() map[worker.State]int {
                worker.StateBooting: len(p.unalloc) - len(p.idle),
                worker.StateIdle:    len(p.idle),
                worker.StateRunning: len(p.running),
+               worker.StateUnknown: len(p.unknown),
        }
 }
 func (p *stubPool) StartContainer(it arvados.InstanceType, ctr arvados.Container) bool {
@@ -117,12 +129,10 @@ var _ = check.Suite(&SchedulerSuite{})
 
 type SchedulerSuite struct{}
 
-// Assign priority=4 container to idle node. Create a new instance for
-// the priority=3 container. Don't try to start any priority<3
-// containers because priority=3 container didn't start
-// immediately. Don't try to create any other nodes after the failed
-// create.
+// Assign priority=4 container to idle node. Create new instances for
+// the priority=3, 2, 1 containers.
 func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) {
+       ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
        queue := test.Queue{
                ChooseType: chooseType,
                Containers: []arvados.Container{
@@ -166,6 +176,7 @@ func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) {
        }
        queue.Update()
        pool := stubPool{
+               quota: 1000,
                unalloc: map[arvados.InstanceType]int{
                        test.InstanceType(1): 1,
                        test.InstanceType(2): 2,
@@ -177,8 +188,8 @@ func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) {
                running:   map[string]time.Time{},
                canCreate: 0,
        }
-       New(logger, &queue, &pool, time.Millisecond, time.Millisecond).runQueue()
-       c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1)})
+       New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond).runQueue()
+       c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1), test.InstanceType(1), test.InstanceType(1)})
        c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})
        c.Check(pool.running, check.HasLen, 1)
        for uuid := range pool.running {
@@ -186,13 +197,14 @@ func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) {
        }
 }
 
-// If Create() fails, shutdown some nodes, and don't call Create()
-// again.  Don't call Create() at all if AtQuota() is true.
+// If pool.AtQuota() is true, shutdown some unalloc nodes, and don't
+// call Create().
 func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
-       for quota := 0; quota < 2; quota++ {
+       ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+       for quota := 1; quota < 3; quota++ {
                c.Logf("quota=%d", quota)
                shouldCreate := []arvados.InstanceType{}
-               for i := 0; i < quota; i++ {
+               for i := 1; i < quota; i++ {
                        shouldCreate = append(shouldCreate, test.InstanceType(3))
                }
                queue := test.Queue{
@@ -220,7 +232,7 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
                }
                queue.Update()
                pool := stubPool{
-                       atQuota: quota == 0,
+                       quota: quota,
                        unalloc: map[arvados.InstanceType]int{
                                test.InstanceType(2): 2,
                        },
@@ -232,17 +244,90 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
                        starts:    []string{},
                        canCreate: 0,
                }
-               New(logger, &queue, &pool, time.Millisecond, time.Millisecond).runQueue()
+               sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+               sch.runQueue()
+               sch.sync()
+               sch.runQueue()
+               sch.sync()
                c.Check(pool.creates, check.DeepEquals, shouldCreate)
-               c.Check(pool.starts, check.DeepEquals, []string{})
-               c.Check(pool.shutdowns, check.Not(check.Equals), 0)
+               if len(shouldCreate) == 0 {
+                       c.Check(pool.starts, check.DeepEquals, []string{})
+               } else {
+                       c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})
+               }
+               c.Check(pool.shutdowns, check.Equals, 3-quota)
+               c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
+                       {UUID: "zzzzz-dz642-000000000000003", From: "Locked", To: "Queued"},
+                       {UUID: "zzzzz-dz642-000000000000002", From: "Locked", To: "Queued"},
+               })
+       }
+}
+
+// Don't flap lock/unlock when equal-priority containers compete for
+// limited workers.
+//
+// (Unless we use FirstSeenAt as a secondary sort key, each runQueue()
+// tends to choose a different one of the equal-priority containers as
+// the "first" one that should be locked, and unlock the one it chose
+// last time. This generates logging noise, and fails containers by
+// reaching MaxDispatchAttempts quickly.)
+func (*SchedulerSuite) TestEqualPriorityContainers(c *check.C) {
+       logger := ctxlog.TestLogger(c)
+       ctx := ctxlog.Context(context.Background(), logger)
+       queue := test.Queue{
+               ChooseType: chooseType,
+               Logger:     logger,
+       }
+       for i := 0; i < 8; i++ {
+               queue.Containers = append(queue.Containers, arvados.Container{
+                       UUID:     test.ContainerUUID(i),
+                       Priority: 333,
+                       State:    arvados.ContainerStateQueued,
+                       RuntimeConstraints: arvados.RuntimeConstraints{
+                               VCPUs: 3,
+                               RAM:   3 << 30,
+                       },
+               })
+       }
+       queue.Update()
+       pool := stubPool{
+               quota: 2,
+               unalloc: map[arvados.InstanceType]int{
+                       test.InstanceType(3): 1,
+               },
+               idle: map[arvados.InstanceType]int{
+                       test.InstanceType(3): 1,
+               },
+               running:   map[string]time.Time{},
+               creates:   []arvados.InstanceType{},
+               starts:    []string{},
+               canCreate: 1,
+       }
+       sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+       for i := 0; i < 30; i++ {
+               sch.runQueue()
+               sch.sync()
+               time.Sleep(time.Millisecond)
+       }
+       c.Check(pool.shutdowns, check.Equals, 0)
+       c.Check(pool.starts, check.HasLen, 1)
+       unlocked := map[string]int{}
+       for _, chg := range queue.StateChanges() {
+               if chg.To == arvados.ContainerStateQueued {
+                       unlocked[chg.UUID]++
+               }
+       }
+       for uuid, count := range unlocked {
+               c.Check(count, check.Equals, 1, check.Commentf("%s", uuid))
        }
 }
 
 // Start lower-priority containers while waiting for new/existing
 // workers to come up for higher-priority containers.
 func (*SchedulerSuite) TestStartWhileCreating(c *check.C) {
+       ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
        pool := stubPool{
+               quota: 1000,
                unalloc: map[arvados.InstanceType]int{
                        test.InstanceType(1): 2,
                        test.InstanceType(2): 2,
@@ -320,7 +405,7 @@ func (*SchedulerSuite) TestStartWhileCreating(c *check.C) {
                },
        }
        queue.Update()
-       New(logger, &queue, &pool, time.Millisecond, time.Millisecond).runQueue()
+       New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond).runQueue()
        c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(2), test.InstanceType(1)})
        c.Check(pool.starts, check.DeepEquals, []string{uuids[6], uuids[5], uuids[3], uuids[2]})
        running := map[string]bool{}
@@ -333,3 +418,118 @@ func (*SchedulerSuite) TestStartWhileCreating(c *check.C) {
        }
        c.Check(running, check.DeepEquals, map[string]bool{uuids[3]: false, uuids[6]: false})
 }
+
+func (*SchedulerSuite) TestKillNonexistentContainer(c *check.C) {
+       ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+       pool := stubPool{
+               quota: 1000,
+               unalloc: map[arvados.InstanceType]int{
+                       test.InstanceType(2): 0,
+               },
+               idle: map[arvados.InstanceType]int{
+                       test.InstanceType(2): 0,
+               },
+               running: map[string]time.Time{
+                       test.ContainerUUID(2): {},
+               },
+       }
+       queue := test.Queue{
+               ChooseType: chooseType,
+               Containers: []arvados.Container{
+                       {
+                               // create a new worker
+                               UUID:     test.ContainerUUID(1),
+                               Priority: 1,
+                               State:    arvados.ContainerStateLocked,
+                               RuntimeConstraints: arvados.RuntimeConstraints{
+                                       VCPUs: 1,
+                                       RAM:   1 << 30,
+                               },
+                       },
+               },
+       }
+       queue.Update()
+       sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+       c.Check(pool.running, check.HasLen, 1)
+       sch.sync()
+       for deadline := time.Now().Add(time.Second); len(pool.Running()) > 0 && time.Now().Before(deadline); time.Sleep(time.Millisecond) {
+       }
+       c.Check(pool.Running(), check.HasLen, 0)
+}
+
+func (*SchedulerSuite) TestContainersMetrics(c *check.C) {
+       ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+       queue := test.Queue{
+               ChooseType: chooseType,
+               Containers: []arvados.Container{
+                       {
+                               UUID:      test.ContainerUUID(1),
+                               Priority:  1,
+                               State:     arvados.ContainerStateLocked,
+                               CreatedAt: time.Now().Add(-10 * time.Second),
+                               RuntimeConstraints: arvados.RuntimeConstraints{
+                                       VCPUs: 1,
+                                       RAM:   1 << 30,
+                               },
+                       },
+               },
+       }
+       queue.Update()
+
+       // Create a pool with one unallocated (idle/booting/unknown) worker,
+       // and `idle` and `unknown` not set (empty). Iow this worker is in the booting
+       // state, and the container will be allocated but not started yet.
+       pool := stubPool{
+               unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1},
+       }
+       sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+       sch.runQueue()
+       sch.updateMetrics()
+
+       c.Check(int(testutil.ToFloat64(sch.mContainersAllocatedNotStarted)), check.Equals, 1)
+       c.Check(int(testutil.ToFloat64(sch.mContainersNotAllocatedOverQuota)), check.Equals, 0)
+       c.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 10)
+
+       // Create a pool without workers. The queued container will not be started, and the
+       // 'over quota' metric will be 1 because no workers are available and canCreate defaults
+       // to zero.
+       pool = stubPool{}
+       sch = New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+       sch.runQueue()
+       sch.updateMetrics()
+
+       c.Check(int(testutil.ToFloat64(sch.mContainersAllocatedNotStarted)), check.Equals, 0)
+       c.Check(int(testutil.ToFloat64(sch.mContainersNotAllocatedOverQuota)), check.Equals, 1)
+       c.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 10)
+
+       // Reset the queue, and create a pool with an idle worker. The queued
+       // container will be started immediately and mLongestWaitTimeSinceQueue
+       // should be zero.
+       queue = test.Queue{
+               ChooseType: chooseType,
+               Containers: []arvados.Container{
+                       {
+                               UUID:      test.ContainerUUID(1),
+                               Priority:  1,
+                               State:     arvados.ContainerStateLocked,
+                               CreatedAt: time.Now().Add(-10 * time.Second),
+                               RuntimeConstraints: arvados.RuntimeConstraints{
+                                       VCPUs: 1,
+                                       RAM:   1 << 30,
+                               },
+                       },
+               },
+       }
+       queue.Update()
+
+       pool = stubPool{
+               idle:    map[arvados.InstanceType]int{test.InstanceType(1): 1},
+               unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1},
+               running: map[string]time.Time{},
+       }
+       sch = New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+       sch.runQueue()
+       sch.updateMetrics()
+
+       c.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 0)
+}