X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/e16542e3cb4fddf05c407cb013c9e1573eb9c289..e5394906b154b630699c0edd4add36eca34611b3:/lib/dispatchcloud/scheduler/run_queue_test.go diff --git a/lib/dispatchcloud/scheduler/run_queue_test.go b/lib/dispatchcloud/scheduler/run_queue_test.go index c683b704d4..4359ae03ba 100644 --- a/lib/dispatchcloud/scheduler/run_queue_test.go +++ b/lib/dispatchcloud/scheduler/run_queue_test.go @@ -9,10 +9,13 @@ import ( "sync" "time" - "git.curoverse.com/arvados.git/lib/dispatchcloud/test" - "git.curoverse.com/arvados.git/lib/dispatchcloud/worker" - "git.curoverse.com/arvados.git/sdk/go/arvados" - "git.curoverse.com/arvados.git/sdk/go/ctxlog" + "git.arvados.org/arvados.git/lib/dispatchcloud/test" + "git.arvados.org/arvados.git/lib/dispatchcloud/worker" + "git.arvados.org/arvados.git/sdk/go/arvados" + "git.arvados.org/arvados.git/sdk/go/ctxlog" + + "github.com/prometheus/client_golang/prometheus/testutil" + check "gopkg.in/check.v1" ) @@ -26,18 +29,13 @@ var ( }() ) -type stubQuotaError struct { - error -} - -func (stubQuotaError) IsQuotaError() bool { return true } - type stubPool struct { notify <-chan struct{} unalloc map[arvados.InstanceType]int // idle+booting+unknown idle map[arvados.InstanceType]int + unknown map[arvados.InstanceType]int running map[string]time.Time - atQuota bool + quota int canCreate int creates []arvados.InstanceType starts []string @@ -45,7 +43,18 @@ type stubPool struct { sync.Mutex } -func (p *stubPool) AtQuota() bool { return p.atQuota } +func (p *stubPool) AtQuota() bool { + p.Lock() + defer p.Unlock() + n := len(p.running) + for _, nn := range p.unalloc { + n += nn + } + for _, nn := range p.unknown { + n += nn + } + return n >= p.quota +} func (p *stubPool) Subscribe() <-chan struct{} { return p.notify } func (p *stubPool) Unsubscribe(<-chan struct{}) {} func (p *stubPool) Running() map[string]time.Time { @@ -62,7 +71,7 @@ func (p *stubPool) Unallocated() map[arvados.InstanceType]int { defer p.Unlock() r := map[arvados.InstanceType]int{} for it, n := range p.unalloc { - r[it] = n + r[it] = n - p.unknown[it] } return r } @@ -82,8 +91,9 @@ func (p *stubPool) ForgetContainer(uuid string) { func (p *stubPool) KillContainer(uuid, reason string) bool { p.Lock() defer p.Unlock() - delete(p.running, uuid) - return true + defer delete(p.running, uuid) + t, ok := p.running[uuid] + return ok && t.IsZero() } func (p *stubPool) Shutdown(arvados.InstanceType) bool { p.shutdowns++ @@ -96,6 +106,7 @@ func (p *stubPool) CountWorkers() map[worker.State]int { worker.StateBooting: len(p.unalloc) - len(p.idle), worker.StateIdle: len(p.idle), worker.StateRunning: len(p.running), + worker.StateUnknown: len(p.unknown), } } func (p *stubPool) StartContainer(it arvados.InstanceType, ctr arvados.Container) bool { @@ -119,11 +130,8 @@ var _ = check.Suite(&SchedulerSuite{}) type SchedulerSuite struct{} -// Assign priority=4 container to idle node. Create a new instance for -// the priority=3 container. Don't try to start any priority<3 -// containers because priority=3 container didn't start -// immediately. Don't try to create any other nodes after the failed -// create. +// Assign priority=4 container to idle node. Create new instances for +// the priority=3, 2, 1 containers. func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) { ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c)) queue := test.Queue{ @@ -169,6 +177,7 @@ func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) { } queue.Update() pool := stubPool{ + quota: 1000, unalloc: map[arvados.InstanceType]int{ test.InstanceType(1): 1, test.InstanceType(2): 2, @@ -180,8 +189,8 @@ func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) { running: map[string]time.Time{}, canCreate: 0, } - New(ctx, &queue, &pool, time.Millisecond, time.Millisecond).runQueue() - c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1)}) + New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0).runQueue() + c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1), test.InstanceType(1), test.InstanceType(1)}) c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)}) c.Check(pool.running, check.HasLen, 1) for uuid := range pool.running { @@ -189,16 +198,12 @@ func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) { } } -// If Create() fails, shutdown some nodes, and don't call Create() -// again. Don't call Create() at all if AtQuota() is true. +// If pool.AtQuota() is true, shutdown some unalloc nodes, and don't +// call Create(). func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) { ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c)) - for quota := 0; quota < 2; quota++ { + for quota := 1; quota <= 3; quota++ { c.Logf("quota=%d", quota) - shouldCreate := []arvados.InstanceType{} - for i := 0; i < quota; i++ { - shouldCreate = append(shouldCreate, test.InstanceType(3)) - } queue := test.Queue{ ChooseType: chooseType, Containers: []arvados.Container{ @@ -224,7 +229,7 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) { } queue.Update() pool := stubPool{ - atQuota: quota == 0, + quota: quota, unalloc: map[arvados.InstanceType]int{ test.InstanceType(2): 2, }, @@ -236,10 +241,301 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) { starts: []string{}, canCreate: 0, } - New(ctx, &queue, &pool, time.Millisecond, time.Millisecond).runQueue() - c.Check(pool.creates, check.DeepEquals, shouldCreate) - c.Check(pool.starts, check.DeepEquals, []string{}) - c.Check(pool.shutdowns, check.Not(check.Equals), 0) + sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0) + sch.sync() + sch.runQueue() + sch.sync() + switch quota { + case 1, 2: + // Can't create a type3 node for ctr3, so we + // shutdown an unallocated node (type2), and + // unlock both containers. + c.Check(pool.starts, check.HasLen, 0) + c.Check(pool.shutdowns, check.Equals, 1) + c.Check(pool.creates, check.HasLen, 0) + c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{ + {UUID: test.ContainerUUID(3), From: "Locked", To: "Queued"}, + {UUID: test.ContainerUUID(2), From: "Locked", To: "Queued"}, + }) + case 3: + // Creating a type3 instance works, so we + // start ctr2 on a type2 instance, and leave + // ctr3 locked while we wait for the new + // instance to come up. + c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)}) + c.Check(pool.shutdowns, check.Equals, 0) + c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(3)}) + c.Check(queue.StateChanges(), check.HasLen, 0) + default: + panic("test not written for quota>3") + } + } +} + +// Don't unlock containers or shutdown unalloc (booting/idle) nodes +// just because some 503 errors caused us to reduce maxConcurrency +// below the current load level. +// +// We expect to raise maxConcurrency soon when we stop seeing 503s. If +// that doesn't happen soon, the idle timeout will take care of the +// excess nodes. +func (*SchedulerSuite) TestIdleIn503QuietPeriod(c *check.C) { + ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c)) + queue := test.Queue{ + ChooseType: chooseType, + Containers: []arvados.Container{ + // scheduled on an instance (but not Running yet) + { + UUID: test.ContainerUUID(1), + Priority: 1000, + State: arvados.ContainerStateLocked, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 2, + RAM: 2 << 30, + }, + }, + // not yet scheduled + { + UUID: test.ContainerUUID(2), + Priority: 1000, + State: arvados.ContainerStateLocked, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 2, + RAM: 2 << 30, + }, + }, + // scheduled on an instance (but not Running yet) + { + UUID: test.ContainerUUID(3), + Priority: 1000, + State: arvados.ContainerStateLocked, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 3, + RAM: 3 << 30, + }, + }, + // not yet scheduled + { + UUID: test.ContainerUUID(4), + Priority: 1000, + State: arvados.ContainerStateLocked, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 3, + RAM: 3 << 30, + }, + }, + // not yet locked + { + UUID: test.ContainerUUID(5), + Priority: 1000, + State: arvados.ContainerStateQueued, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 3, + RAM: 3 << 30, + }, + }, + }, + } + queue.Update() + pool := stubPool{ + quota: 16, + unalloc: map[arvados.InstanceType]int{ + test.InstanceType(2): 2, + test.InstanceType(3): 2, + }, + idle: map[arvados.InstanceType]int{ + test.InstanceType(2): 1, + test.InstanceType(3): 1, + }, + running: map[string]time.Time{ + test.ContainerUUID(1): {}, + test.ContainerUUID(3): {}, + }, + creates: []arvados.InstanceType{}, + starts: []string{}, + canCreate: 0, + } + sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0) + sch.last503time = time.Now() + sch.maxConcurrency = 3 + sch.sync() + sch.runQueue() + sch.sync() + + c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)}) + c.Check(pool.shutdowns, check.Equals, 0) + c.Check(pool.creates, check.HasLen, 0) + c.Check(queue.StateChanges(), check.HasLen, 0) +} + +// If we somehow have more supervisor containers in Locked state than +// we should (e.g., config changed since they started), and some +// appropriate-sized instances booting up, unlock the excess +// supervisor containers, but let the instances keep booting. +func (*SchedulerSuite) TestUnlockExcessSupervisors(c *check.C) { + ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c)) + queue := test.Queue{ + ChooseType: chooseType, + } + for i := 1; i <= 6; i++ { + queue.Containers = append(queue.Containers, arvados.Container{ + UUID: test.ContainerUUID(i), + Priority: int64(1000 - i), + State: arvados.ContainerStateLocked, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 2, + RAM: 2 << 30, + }, + SchedulingParameters: arvados.SchedulingParameters{ + Supervisor: true, + }, + }) + } + queue.Update() + pool := stubPool{ + quota: 16, + unalloc: map[arvados.InstanceType]int{ + test.InstanceType(2): 2, + }, + idle: map[arvados.InstanceType]int{ + test.InstanceType(2): 1, + }, + running: map[string]time.Time{ + test.ContainerUUID(1): {}, + test.ContainerUUID(2): {}, + test.ContainerUUID(3): {}, + test.ContainerUUID(4): {}, + }, + creates: []arvados.InstanceType{}, + starts: []string{}, + canCreate: 0, + } + sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 8, 0.5) + sch.sync() + sch.runQueue() + sch.sync() + + c.Check(pool.starts, check.DeepEquals, []string{}) + c.Check(pool.shutdowns, check.Equals, 0) + c.Check(pool.creates, check.HasLen, 0) + c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{ + {UUID: test.ContainerUUID(5), From: "Locked", To: "Queued"}, + {UUID: test.ContainerUUID(6), From: "Locked", To: "Queued"}, + }) +} + +// Assuming we're not at quota, don't try to shutdown idle nodes +// merely because we have more queued/locked supervisor containers +// than MaxSupervisors -- it won't help. +func (*SchedulerSuite) TestExcessSupervisors(c *check.C) { + ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c)) + queue := test.Queue{ + ChooseType: chooseType, + } + for i := 1; i <= 8; i++ { + queue.Containers = append(queue.Containers, arvados.Container{ + UUID: test.ContainerUUID(i), + Priority: int64(1000 + i), + State: arvados.ContainerStateQueued, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 2, + RAM: 2 << 30, + }, + SchedulingParameters: arvados.SchedulingParameters{ + Supervisor: true, + }, + }) + } + for i := 2; i < 4; i++ { + queue.Containers[i].State = arvados.ContainerStateLocked + } + for i := 4; i < 6; i++ { + queue.Containers[i].State = arvados.ContainerStateRunning + } + queue.Update() + pool := stubPool{ + quota: 16, + unalloc: map[arvados.InstanceType]int{ + test.InstanceType(2): 2, + }, + idle: map[arvados.InstanceType]int{ + test.InstanceType(2): 1, + }, + running: map[string]time.Time{ + test.ContainerUUID(5): {}, + test.ContainerUUID(6): {}, + }, + creates: []arvados.InstanceType{}, + starts: []string{}, + canCreate: 0, + } + sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 8, 0.5) + sch.sync() + sch.runQueue() + sch.sync() + + c.Check(pool.starts, check.HasLen, 2) + c.Check(pool.shutdowns, check.Equals, 0) + c.Check(pool.creates, check.HasLen, 0) + c.Check(queue.StateChanges(), check.HasLen, 0) +} + +// Don't flap lock/unlock when equal-priority containers compete for +// limited workers. +// +// (Unless we use FirstSeenAt as a secondary sort key, each runQueue() +// tends to choose a different one of the equal-priority containers as +// the "first" one that should be locked, and unlock the one it chose +// last time. This generates logging noise, and fails containers by +// reaching MaxDispatchAttempts quickly.) +func (*SchedulerSuite) TestEqualPriorityContainers(c *check.C) { + logger := ctxlog.TestLogger(c) + ctx := ctxlog.Context(context.Background(), logger) + queue := test.Queue{ + ChooseType: chooseType, + Logger: logger, + } + for i := 0; i < 8; i++ { + queue.Containers = append(queue.Containers, arvados.Container{ + UUID: test.ContainerUUID(i), + Priority: 333, + State: arvados.ContainerStateQueued, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 3, + RAM: 3 << 30, + }, + }) + } + queue.Update() + pool := stubPool{ + quota: 2, + unalloc: map[arvados.InstanceType]int{ + test.InstanceType(3): 2, + }, + idle: map[arvados.InstanceType]int{ + test.InstanceType(3): 2, + }, + running: map[string]time.Time{}, + creates: []arvados.InstanceType{}, + starts: []string{}, + canCreate: 0, + } + sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0) + for i := 0; i < 30; i++ { + sch.runQueue() + sch.sync() + time.Sleep(time.Millisecond) + } + c.Check(pool.shutdowns, check.Equals, 0) + c.Check(pool.starts, check.HasLen, 2) + unlocked := map[string]int{} + for _, chg := range queue.StateChanges() { + if chg.To == arvados.ContainerStateQueued { + unlocked[chg.UUID]++ + } + } + for uuid, count := range unlocked { + c.Check(count, check.Equals, 1, check.Commentf("%s", uuid)) } } @@ -248,6 +544,7 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) { func (*SchedulerSuite) TestStartWhileCreating(c *check.C) { ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c)) pool := stubPool{ + quota: 1000, unalloc: map[arvados.InstanceType]int{ test.InstanceType(1): 2, test.InstanceType(2): 2, @@ -325,7 +622,7 @@ func (*SchedulerSuite) TestStartWhileCreating(c *check.C) { }, } queue.Update() - New(ctx, &queue, &pool, time.Millisecond, time.Millisecond).runQueue() + New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0).runQueue() c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(2), test.InstanceType(1)}) c.Check(pool.starts, check.DeepEquals, []string{uuids[6], uuids[5], uuids[3], uuids[2]}) running := map[string]bool{} @@ -342,6 +639,7 @@ func (*SchedulerSuite) TestStartWhileCreating(c *check.C) { func (*SchedulerSuite) TestKillNonexistentContainer(c *check.C) { ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c)) pool := stubPool{ + quota: 1000, unalloc: map[arvados.InstanceType]int{ test.InstanceType(2): 0, }, @@ -349,7 +647,7 @@ func (*SchedulerSuite) TestKillNonexistentContainer(c *check.C) { test.InstanceType(2): 0, }, running: map[string]time.Time{ - test.ContainerUUID(2): time.Time{}, + test.ContainerUUID(2): {}, }, } queue := test.Queue{ @@ -368,10 +666,159 @@ func (*SchedulerSuite) TestKillNonexistentContainer(c *check.C) { }, } queue.Update() - sch := New(ctx, &queue, &pool, time.Millisecond, time.Millisecond) + sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0) c.Check(pool.running, check.HasLen, 1) sch.sync() for deadline := time.Now().Add(time.Second); len(pool.Running()) > 0 && time.Now().Before(deadline); time.Sleep(time.Millisecond) { } c.Check(pool.Running(), check.HasLen, 0) } + +func (*SchedulerSuite) TestContainersMetrics(c *check.C) { + ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c)) + queue := test.Queue{ + ChooseType: chooseType, + Containers: []arvados.Container{ + { + UUID: test.ContainerUUID(1), + Priority: 1, + State: arvados.ContainerStateLocked, + CreatedAt: time.Now().Add(-10 * time.Second), + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 1, + RAM: 1 << 30, + }, + }, + }, + } + queue.Update() + + // Create a pool with one unallocated (idle/booting/unknown) worker, + // and `idle` and `unknown` not set (empty). Iow this worker is in the booting + // state, and the container will be allocated but not started yet. + pool := stubPool{ + unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1}, + } + sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0) + sch.runQueue() + sch.updateMetrics() + + c.Check(int(testutil.ToFloat64(sch.mContainersAllocatedNotStarted)), check.Equals, 1) + c.Check(int(testutil.ToFloat64(sch.mContainersNotAllocatedOverQuota)), check.Equals, 0) + c.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 10) + + // Create a pool without workers. The queued container will not be started, and the + // 'over quota' metric will be 1 because no workers are available and canCreate defaults + // to zero. + pool = stubPool{} + sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0) + sch.runQueue() + sch.updateMetrics() + + c.Check(int(testutil.ToFloat64(sch.mContainersAllocatedNotStarted)), check.Equals, 0) + c.Check(int(testutil.ToFloat64(sch.mContainersNotAllocatedOverQuota)), check.Equals, 1) + c.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 10) + + // Reset the queue, and create a pool with an idle worker. The queued + // container will be started immediately and mLongestWaitTimeSinceQueue + // should be zero. + queue = test.Queue{ + ChooseType: chooseType, + Containers: []arvados.Container{ + { + UUID: test.ContainerUUID(1), + Priority: 1, + State: arvados.ContainerStateLocked, + CreatedAt: time.Now().Add(-10 * time.Second), + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 1, + RAM: 1 << 30, + }, + }, + }, + } + queue.Update() + + pool = stubPool{ + idle: map[arvados.InstanceType]int{test.InstanceType(1): 1}, + unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1}, + running: map[string]time.Time{}, + } + sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0) + sch.runQueue() + sch.updateMetrics() + + c.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 0) +} + +// Assign priority=4, 3 and 1 containers to idle nodes. Ignore the supervisor at priority 2. +func (*SchedulerSuite) TestSkipSupervisors(c *check.C) { + ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c)) + queue := test.Queue{ + ChooseType: chooseType, + Containers: []arvados.Container{ + { + UUID: test.ContainerUUID(1), + Priority: 1, + State: arvados.ContainerStateLocked, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 1, + RAM: 1 << 30, + }, + }, + { + UUID: test.ContainerUUID(2), + Priority: 2, + State: arvados.ContainerStateLocked, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 1, + RAM: 1 << 30, + }, + SchedulingParameters: arvados.SchedulingParameters{ + Supervisor: true, + }, + }, + { + UUID: test.ContainerUUID(3), + Priority: 3, + State: arvados.ContainerStateLocked, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 1, + RAM: 1 << 30, + }, + SchedulingParameters: arvados.SchedulingParameters{ + Supervisor: true, + }, + }, + { + UUID: test.ContainerUUID(4), + Priority: 4, + State: arvados.ContainerStateLocked, + RuntimeConstraints: arvados.RuntimeConstraints{ + VCPUs: 1, + RAM: 1 << 30, + }, + SchedulingParameters: arvados.SchedulingParameters{ + Supervisor: true, + }, + }, + }, + } + queue.Update() + pool := stubPool{ + quota: 1000, + unalloc: map[arvados.InstanceType]int{ + test.InstanceType(1): 4, + test.InstanceType(2): 4, + }, + idle: map[arvados.InstanceType]int{ + test.InstanceType(1): 4, + test.InstanceType(2): 4, + }, + running: map[string]time.Time{}, + canCreate: 0, + } + New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 10, 0.2).runQueue() + c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType(nil)) + c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4), test.ContainerUUID(3), test.ContainerUUID(1)}) +}