1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
12 "git.arvados.org/arvados.git/lib/dispatchcloud/test"
13 "git.arvados.org/arvados.git/lib/dispatchcloud/worker"
14 "git.arvados.org/arvados.git/sdk/go/arvados"
15 "git.arvados.org/arvados.git/sdk/go/ctxlog"
17 "github.com/prometheus/client_golang/prometheus/testutil"
19 check "gopkg.in/check.v1"
23 // arbitrary example container UUIDs
24 uuids = func() (r []string) {
25 for i := 0; i < 16; i++ {
26 r = append(r, test.ContainerUUID(i))
32 type stubPool struct {
33 notify <-chan struct{}
34 unalloc map[arvados.InstanceType]int // idle+booting+unknown
35 busy map[arvados.InstanceType]int
36 idle map[arvados.InstanceType]int
37 unknown map[arvados.InstanceType]int
38 running map[string]time.Time
40 capacity map[string]int
42 creates []arvados.InstanceType
48 func (p *stubPool) AtQuota() bool {
52 for _, nn := range p.unalloc {
55 for _, nn := range p.unknown {
60 func (p *stubPool) AtCapacity(it arvados.InstanceType) bool {
61 supply, ok := p.capacity[it.ProviderType]
65 for _, existing := range []map[arvados.InstanceType]int{p.unalloc, p.busy} {
66 for eit, n := range existing {
67 if eit.ProviderType == it.ProviderType {
74 func (p *stubPool) Subscribe() <-chan struct{} { return p.notify }
75 func (p *stubPool) Unsubscribe(<-chan struct{}) {}
76 func (p *stubPool) Running() map[string]time.Time {
79 r := map[string]time.Time{}
80 for k, v := range p.running {
85 func (p *stubPool) Unallocated() map[arvados.InstanceType]int {
88 r := map[arvados.InstanceType]int{}
89 for it, n := range p.unalloc {
90 r[it] = n - p.unknown[it]
94 func (p *stubPool) Create(it arvados.InstanceType) bool {
97 p.creates = append(p.creates, it)
105 func (p *stubPool) ForgetContainer(uuid string) {
107 func (p *stubPool) KillContainer(uuid, reason string) bool {
110 defer delete(p.running, uuid)
111 t, ok := p.running[uuid]
112 return ok && t.IsZero()
114 func (p *stubPool) Shutdown(arvados.InstanceType) bool {
118 func (p *stubPool) CountWorkers() map[worker.State]int {
121 return map[worker.State]int{
122 worker.StateBooting: len(p.unalloc) - len(p.idle),
123 worker.StateIdle: len(p.idle),
124 worker.StateRunning: len(p.running),
125 worker.StateUnknown: len(p.unknown),
128 func (p *stubPool) StartContainer(it arvados.InstanceType, ctr arvados.Container) bool {
131 p.starts = append(p.starts, ctr.UUID)
138 p.running[ctr.UUID] = time.Time{}
142 func chooseType(ctr *arvados.Container) ([]arvados.InstanceType, error) {
143 return []arvados.InstanceType{test.InstanceType(ctr.RuntimeConstraints.VCPUs)}, nil
146 var _ = check.Suite(&SchedulerSuite{})
148 type SchedulerSuite struct{}
150 // Assign priority=4 container to idle node. Create new instances for
151 // the priority=3, 2, 1 containers.
152 func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) {
153 ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
155 ChooseType: chooseType,
156 Containers: []arvados.Container{
158 UUID: test.ContainerUUID(1),
160 State: arvados.ContainerStateLocked,
161 RuntimeConstraints: arvados.RuntimeConstraints{
167 UUID: test.ContainerUUID(2),
169 State: arvados.ContainerStateLocked,
170 RuntimeConstraints: arvados.RuntimeConstraints{
176 UUID: test.ContainerUUID(3),
178 State: arvados.ContainerStateLocked,
179 RuntimeConstraints: arvados.RuntimeConstraints{
185 UUID: test.ContainerUUID(4),
187 State: arvados.ContainerStateLocked,
188 RuntimeConstraints: arvados.RuntimeConstraints{
198 unalloc: map[arvados.InstanceType]int{
199 test.InstanceType(1): 1,
200 test.InstanceType(2): 2,
202 idle: map[arvados.InstanceType]int{
203 test.InstanceType(1): 1,
204 test.InstanceType(2): 2,
206 busy: map[arvados.InstanceType]int{},
207 running: map[string]time.Time{},
210 New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0).runQueue()
211 c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1), test.InstanceType(1), test.InstanceType(1)})
212 c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})
213 c.Check(pool.running, check.HasLen, 1)
214 for uuid := range pool.running {
215 c.Check(uuid, check.Equals, uuids[4])
219 // If pool.AtQuota() is true, shutdown some unalloc nodes, and don't
221 func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
222 ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
223 for quota := 1; quota <= 3; quota++ {
224 c.Logf("quota=%d", quota)
226 ChooseType: chooseType,
227 Containers: []arvados.Container{
229 UUID: test.ContainerUUID(2),
231 State: arvados.ContainerStateLocked,
232 RuntimeConstraints: arvados.RuntimeConstraints{
238 UUID: test.ContainerUUID(3),
240 State: arvados.ContainerStateLocked,
241 RuntimeConstraints: arvados.RuntimeConstraints{
251 unalloc: map[arvados.InstanceType]int{
252 test.InstanceType(2): 2,
254 idle: map[arvados.InstanceType]int{
255 test.InstanceType(2): 2,
257 busy: map[arvados.InstanceType]int{},
258 running: map[string]time.Time{},
259 creates: []arvados.InstanceType{},
263 sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
269 // Can't create a type3 node for ctr3, so we
270 // shutdown an unallocated node (type2), and
271 // unlock the 2nd-in-line container, but not
272 // the 1st-in-line container.
273 c.Check(pool.starts, check.HasLen, 0)
274 c.Check(pool.shutdowns, check.Equals, 1)
275 c.Check(pool.creates, check.HasLen, 0)
276 c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
277 {UUID: test.ContainerUUID(2), From: "Locked", To: "Queued"},
280 // Creating a type3 instance works, so we
281 // start ctr2 on a type2 instance, and leave
282 // ctr3 locked while we wait for the new
283 // instance to come up.
284 c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})
285 c.Check(pool.shutdowns, check.Equals, 0)
286 c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(3)})
287 c.Check(queue.StateChanges(), check.HasLen, 0)
289 panic("test not written for quota>3")
294 // If pool.AtCapacity(it) is true for one instance type, try running a
295 // lower-priority container that uses a different node type. Don't
296 // lock/unlock/start any container that requires the affected instance
298 func (*SchedulerSuite) TestInstanceCapacity(c *check.C) {
299 ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
302 ChooseType: chooseType,
303 Containers: []arvados.Container{
305 UUID: test.ContainerUUID(1),
307 State: arvados.ContainerStateLocked,
308 RuntimeConstraints: arvados.RuntimeConstraints{
314 UUID: test.ContainerUUID(2),
316 State: arvados.ContainerStateQueued,
317 RuntimeConstraints: arvados.RuntimeConstraints{
323 UUID: test.ContainerUUID(3),
325 State: arvados.ContainerStateLocked,
326 RuntimeConstraints: arvados.RuntimeConstraints{
332 UUID: test.ContainerUUID(4),
334 State: arvados.ContainerStateLocked,
335 RuntimeConstraints: arvados.RuntimeConstraints{
345 capacity: map[string]int{test.InstanceType(4).ProviderType: 1},
346 unalloc: map[arvados.InstanceType]int{
347 test.InstanceType(4): 1,
349 idle: map[arvados.InstanceType]int{
350 test.InstanceType(4): 1,
352 busy: map[arvados.InstanceType]int{},
353 running: map[string]time.Time{},
354 creates: []arvados.InstanceType{},
358 sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
363 // Start container4, but then pool reports AtCapacity for
364 // type4, so we skip trying to create an instance for
365 // container3, skip locking container2, but do try to create a
366 // type1 instance for container1.
367 c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})
368 c.Check(pool.shutdowns, check.Equals, 0)
369 c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1)})
370 c.Check(queue.StateChanges(), check.HasLen, 0)
373 // Don't unlock containers or shutdown unalloc (booting/idle) nodes
374 // just because some 503 errors caused us to reduce maxConcurrency
375 // below the current load level.
377 // We expect to raise maxConcurrency soon when we stop seeing 503s. If
378 // that doesn't happen soon, the idle timeout will take care of the
380 func (*SchedulerSuite) TestIdleIn503QuietPeriod(c *check.C) {
381 ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
383 ChooseType: chooseType,
384 Containers: []arvados.Container{
385 // scheduled on an instance (but not Running yet)
387 UUID: test.ContainerUUID(1),
389 State: arvados.ContainerStateLocked,
390 RuntimeConstraints: arvados.RuntimeConstraints{
397 UUID: test.ContainerUUID(2),
399 State: arvados.ContainerStateLocked,
400 RuntimeConstraints: arvados.RuntimeConstraints{
405 // scheduled on an instance (but not Running yet)
407 UUID: test.ContainerUUID(3),
409 State: arvados.ContainerStateLocked,
410 RuntimeConstraints: arvados.RuntimeConstraints{
417 UUID: test.ContainerUUID(4),
419 State: arvados.ContainerStateLocked,
420 RuntimeConstraints: arvados.RuntimeConstraints{
427 UUID: test.ContainerUUID(5),
429 State: arvados.ContainerStateQueued,
430 RuntimeConstraints: arvados.RuntimeConstraints{
440 unalloc: map[arvados.InstanceType]int{
441 test.InstanceType(2): 2,
442 test.InstanceType(3): 2,
444 idle: map[arvados.InstanceType]int{
445 test.InstanceType(2): 1,
446 test.InstanceType(3): 1,
448 busy: map[arvados.InstanceType]int{
449 test.InstanceType(2): 1,
450 test.InstanceType(3): 1,
452 running: map[string]time.Time{
453 test.ContainerUUID(1): {},
454 test.ContainerUUID(3): {},
456 creates: []arvados.InstanceType{},
460 sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
461 sch.last503time = time.Now()
462 sch.maxConcurrency = 3
467 c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})
468 c.Check(pool.shutdowns, check.Equals, 0)
469 c.Check(pool.creates, check.HasLen, 0)
470 c.Check(queue.StateChanges(), check.HasLen, 0)
473 // If we somehow have more supervisor containers in Locked state than
474 // we should (e.g., config changed since they started), and some
475 // appropriate-sized instances booting up, unlock the excess
476 // supervisor containers, but let the instances keep booting.
477 func (*SchedulerSuite) TestUnlockExcessSupervisors(c *check.C) {
478 ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
480 ChooseType: chooseType,
482 for i := 1; i <= 6; i++ {
483 queue.Containers = append(queue.Containers, arvados.Container{
484 UUID: test.ContainerUUID(i),
485 Priority: int64(1000 - i),
486 State: arvados.ContainerStateLocked,
487 RuntimeConstraints: arvados.RuntimeConstraints{
491 SchedulingParameters: arvados.SchedulingParameters{
499 unalloc: map[arvados.InstanceType]int{
500 test.InstanceType(2): 2,
502 idle: map[arvados.InstanceType]int{
503 test.InstanceType(2): 1,
505 busy: map[arvados.InstanceType]int{
506 test.InstanceType(2): 4,
508 running: map[string]time.Time{
509 test.ContainerUUID(1): {},
510 test.ContainerUUID(2): {},
511 test.ContainerUUID(3): {},
512 test.ContainerUUID(4): {},
514 creates: []arvados.InstanceType{},
518 sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 8, 0.5)
523 c.Check(pool.starts, check.DeepEquals, []string{})
524 c.Check(pool.shutdowns, check.Equals, 0)
525 c.Check(pool.creates, check.HasLen, 0)
526 c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
527 {UUID: test.ContainerUUID(5), From: "Locked", To: "Queued"},
528 {UUID: test.ContainerUUID(6), From: "Locked", To: "Queued"},
532 // Assuming we're not at quota, don't try to shutdown idle nodes
533 // merely because we have more queued/locked supervisor containers
534 // than MaxSupervisors -- it won't help.
535 func (*SchedulerSuite) TestExcessSupervisors(c *check.C) {
536 ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
538 ChooseType: chooseType,
540 for i := 1; i <= 8; i++ {
541 queue.Containers = append(queue.Containers, arvados.Container{
542 UUID: test.ContainerUUID(i),
543 Priority: int64(1000 + i),
544 State: arvados.ContainerStateQueued,
545 RuntimeConstraints: arvados.RuntimeConstraints{
549 SchedulingParameters: arvados.SchedulingParameters{
554 for i := 2; i < 4; i++ {
555 queue.Containers[i].State = arvados.ContainerStateLocked
557 for i := 4; i < 6; i++ {
558 queue.Containers[i].State = arvados.ContainerStateRunning
563 unalloc: map[arvados.InstanceType]int{
564 test.InstanceType(2): 2,
566 idle: map[arvados.InstanceType]int{
567 test.InstanceType(2): 1,
569 busy: map[arvados.InstanceType]int{
570 test.InstanceType(2): 2,
572 running: map[string]time.Time{
573 test.ContainerUUID(5): {},
574 test.ContainerUUID(6): {},
576 creates: []arvados.InstanceType{},
580 sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 8, 0.5)
585 c.Check(pool.starts, check.HasLen, 2)
586 c.Check(pool.shutdowns, check.Equals, 0)
587 c.Check(pool.creates, check.HasLen, 0)
588 c.Check(queue.StateChanges(), check.HasLen, 0)
591 // Don't flap lock/unlock when equal-priority containers compete for
594 // (Unless we use FirstSeenAt as a secondary sort key, each runQueue()
595 // tends to choose a different one of the equal-priority containers as
596 // the "first" one that should be locked, and unlock the one it chose
597 // last time. This generates logging noise, and fails containers by
598 // reaching MaxDispatchAttempts quickly.)
599 func (*SchedulerSuite) TestEqualPriorityContainers(c *check.C) {
600 logger := ctxlog.TestLogger(c)
601 ctx := ctxlog.Context(context.Background(), logger)
603 ChooseType: chooseType,
606 for i := 0; i < 8; i++ {
607 queue.Containers = append(queue.Containers, arvados.Container{
608 UUID: test.ContainerUUID(i),
610 State: arvados.ContainerStateQueued,
611 RuntimeConstraints: arvados.RuntimeConstraints{
620 unalloc: map[arvados.InstanceType]int{
621 test.InstanceType(3): 2,
623 idle: map[arvados.InstanceType]int{
624 test.InstanceType(3): 2,
626 busy: map[arvados.InstanceType]int{},
627 running: map[string]time.Time{},
628 creates: []arvados.InstanceType{},
632 sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
633 for i := 0; i < 30; i++ {
636 time.Sleep(time.Millisecond)
638 c.Check(pool.shutdowns, check.Equals, 0)
639 c.Check(pool.starts, check.HasLen, 2)
640 unlocked := map[string]int{}
641 for _, chg := range queue.StateChanges() {
642 if chg.To == arvados.ContainerStateQueued {
646 for uuid, count := range unlocked {
647 c.Check(count, check.Equals, 1, check.Commentf("%s", uuid))
651 // Start lower-priority containers while waiting for new/existing
652 // workers to come up for higher-priority containers.
653 func (*SchedulerSuite) TestStartWhileCreating(c *check.C) {
654 ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
657 unalloc: map[arvados.InstanceType]int{
658 test.InstanceType(1): 2,
659 test.InstanceType(2): 2,
661 idle: map[arvados.InstanceType]int{
662 test.InstanceType(1): 1,
663 test.InstanceType(2): 1,
665 busy: map[arvados.InstanceType]int{},
666 running: map[string]time.Time{},
670 ChooseType: chooseType,
671 Containers: []arvados.Container{
673 // create a new worker
674 UUID: test.ContainerUUID(1),
676 State: arvados.ContainerStateLocked,
677 RuntimeConstraints: arvados.RuntimeConstraints{
683 // tentatively map to unalloc worker
684 UUID: test.ContainerUUID(2),
686 State: arvados.ContainerStateLocked,
687 RuntimeConstraints: arvados.RuntimeConstraints{
693 // start now on idle worker
694 UUID: test.ContainerUUID(3),
696 State: arvados.ContainerStateLocked,
697 RuntimeConstraints: arvados.RuntimeConstraints{
703 // create a new worker
704 UUID: test.ContainerUUID(4),
706 State: arvados.ContainerStateLocked,
707 RuntimeConstraints: arvados.RuntimeConstraints{
713 // tentatively map to unalloc worker
714 UUID: test.ContainerUUID(5),
716 State: arvados.ContainerStateLocked,
717 RuntimeConstraints: arvados.RuntimeConstraints{
723 // start now on idle worker
724 UUID: test.ContainerUUID(6),
726 State: arvados.ContainerStateLocked,
727 RuntimeConstraints: arvados.RuntimeConstraints{
735 New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0).runQueue()
736 c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(2), test.InstanceType(1)})
737 c.Check(pool.starts, check.DeepEquals, []string{uuids[6], uuids[5], uuids[3], uuids[2]})
738 running := map[string]bool{}
739 for uuid, t := range pool.running {
741 running[uuid] = false
746 c.Check(running, check.DeepEquals, map[string]bool{uuids[3]: false, uuids[6]: false})
749 func (*SchedulerSuite) TestKillNonexistentContainer(c *check.C) {
750 ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
753 unalloc: map[arvados.InstanceType]int{
754 test.InstanceType(2): 0,
756 idle: map[arvados.InstanceType]int{
757 test.InstanceType(2): 0,
759 busy: map[arvados.InstanceType]int{
760 test.InstanceType(2): 1,
762 running: map[string]time.Time{
763 test.ContainerUUID(2): {},
767 ChooseType: chooseType,
768 Containers: []arvados.Container{
770 // create a new worker
771 UUID: test.ContainerUUID(1),
773 State: arvados.ContainerStateLocked,
774 RuntimeConstraints: arvados.RuntimeConstraints{
782 sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
783 c.Check(pool.running, check.HasLen, 1)
785 for deadline := time.Now().Add(time.Second); len(pool.Running()) > 0 && time.Now().Before(deadline); time.Sleep(time.Millisecond) {
787 c.Check(pool.Running(), check.HasLen, 0)
790 func (*SchedulerSuite) TestContainersMetrics(c *check.C) {
791 ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
793 ChooseType: chooseType,
794 Containers: []arvados.Container{
796 UUID: test.ContainerUUID(1),
798 State: arvados.ContainerStateLocked,
799 CreatedAt: time.Now().Add(-10 * time.Second),
800 RuntimeConstraints: arvados.RuntimeConstraints{
809 // Create a pool with one unallocated (idle/booting/unknown) worker,
810 // and `idle` and `unknown` not set (empty). Iow this worker is in the booting
811 // state, and the container will be allocated but not started yet.
813 unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1},
815 sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
819 c.Check(int(testutil.ToFloat64(sch.mContainersAllocatedNotStarted)), check.Equals, 1)
820 c.Check(int(testutil.ToFloat64(sch.mContainersNotAllocatedOverQuota)), check.Equals, 0)
821 c.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 10)
823 // Create a pool without workers. The queued container will not be started, and the
824 // 'over quota' metric will be 1 because no workers are available and canCreate defaults
827 sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
831 c.Check(int(testutil.ToFloat64(sch.mContainersAllocatedNotStarted)), check.Equals, 0)
832 c.Check(int(testutil.ToFloat64(sch.mContainersNotAllocatedOverQuota)), check.Equals, 1)
833 c.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 10)
835 // Reset the queue, and create a pool with an idle worker. The queued
836 // container will be started immediately and mLongestWaitTimeSinceQueue
839 ChooseType: chooseType,
840 Containers: []arvados.Container{
842 UUID: test.ContainerUUID(1),
844 State: arvados.ContainerStateLocked,
845 CreatedAt: time.Now().Add(-10 * time.Second),
846 RuntimeConstraints: arvados.RuntimeConstraints{
856 idle: map[arvados.InstanceType]int{test.InstanceType(1): 1},
857 unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1},
858 busy: map[arvados.InstanceType]int{},
859 running: map[string]time.Time{},
861 sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
865 c.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 0)
868 // Assign priority=4, 3 and 1 containers to idle nodes. Ignore the supervisor at priority 2.
869 func (*SchedulerSuite) TestSkipSupervisors(c *check.C) {
870 ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
872 ChooseType: chooseType,
873 Containers: []arvados.Container{
875 UUID: test.ContainerUUID(1),
877 State: arvados.ContainerStateLocked,
878 RuntimeConstraints: arvados.RuntimeConstraints{
884 UUID: test.ContainerUUID(2),
886 State: arvados.ContainerStateLocked,
887 RuntimeConstraints: arvados.RuntimeConstraints{
891 SchedulingParameters: arvados.SchedulingParameters{
896 UUID: test.ContainerUUID(3),
898 State: arvados.ContainerStateLocked,
899 RuntimeConstraints: arvados.RuntimeConstraints{
903 SchedulingParameters: arvados.SchedulingParameters{
908 UUID: test.ContainerUUID(4),
910 State: arvados.ContainerStateLocked,
911 RuntimeConstraints: arvados.RuntimeConstraints{
915 SchedulingParameters: arvados.SchedulingParameters{
924 unalloc: map[arvados.InstanceType]int{
925 test.InstanceType(1): 4,
926 test.InstanceType(2): 4,
928 idle: map[arvados.InstanceType]int{
929 test.InstanceType(1): 4,
930 test.InstanceType(2): 4,
932 busy: map[arvados.InstanceType]int{},
933 running: map[string]time.Time{},
936 New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 10, 0.2).runQueue()
937 c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType(nil))
938 c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4), test.ContainerUUID(3), test.ContainerUUID(1)})