"sync"
"time"
- "git.curoverse.com/arvados.git/lib/dispatchcloud/test"
- "git.curoverse.com/arvados.git/lib/dispatchcloud/worker"
- "git.curoverse.com/arvados.git/sdk/go/arvados"
- "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/lib/dispatchcloud/test"
+ "git.arvados.org/arvados.git/lib/dispatchcloud/worker"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
check "gopkg.in/check.v1"
)
notify <-chan struct{}
unalloc map[arvados.InstanceType]int // idle+booting+unknown
idle map[arvados.InstanceType]int
+ unknown map[arvados.InstanceType]int
running map[string]time.Time
- atQuota bool
+ quota int
canCreate int
creates []arvados.InstanceType
starts []string
sync.Mutex
}
-func (p *stubPool) AtQuota() bool { return p.atQuota }
+func (p *stubPool) AtQuota() bool {
+ p.Lock()
+ defer p.Unlock()
+ return len(p.unalloc)+len(p.running)+len(p.unknown) >= p.quota
+}
func (p *stubPool) Subscribe() <-chan struct{} { return p.notify }
func (p *stubPool) Unsubscribe(<-chan struct{}) {}
func (p *stubPool) Running() map[string]time.Time {
defer p.Unlock()
r := map[arvados.InstanceType]int{}
for it, n := range p.unalloc {
- r[it] = n
+ r[it] = n - p.unknown[it]
}
return r
}
func (p *stubPool) KillContainer(uuid, reason string) bool {
p.Lock()
defer p.Unlock()
- delete(p.running, uuid)
- return true
+ defer delete(p.running, uuid)
+ t, ok := p.running[uuid]
+ return ok && t.IsZero()
}
func (p *stubPool) Shutdown(arvados.InstanceType) bool {
p.shutdowns++
worker.StateBooting: len(p.unalloc) - len(p.idle),
worker.StateIdle: len(p.idle),
worker.StateRunning: len(p.running),
+ worker.StateUnknown: len(p.unknown),
}
}
func (p *stubPool) StartContainer(it arvados.InstanceType, ctr arvados.Container) bool {
type SchedulerSuite struct{}
-// Assign priority=4 container to idle node. Create a new instance for
-// the priority=3 container. Don't try to start any priority<3
-// containers because priority=3 container didn't start
-// immediately. Don't try to create any other nodes after the failed
-// create.
+// Assign priority=4 container to idle node. Create new instances for
+// the priority=3, 2, 1 containers.
func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) {
ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
queue := test.Queue{
}
queue.Update()
pool := stubPool{
+ quota: 1000,
unalloc: map[arvados.InstanceType]int{
test.InstanceType(1): 1,
test.InstanceType(2): 2,
canCreate: 0,
}
New(ctx, &queue, &pool, time.Millisecond, time.Millisecond).runQueue()
- c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1)})
+ c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1), test.InstanceType(1), test.InstanceType(1)})
c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})
c.Check(pool.running, check.HasLen, 1)
for uuid := range pool.running {
}
}
-// If Create() fails, shutdown some nodes, and don't call Create()
-// again. Don't call Create() at all if AtQuota() is true.
+// If pool.AtQuota() is true, shutdown some unalloc nodes, and don't
+// call Create().
func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
- for quota := 0; quota < 2; quota++ {
+ for quota := 1; quota < 3; quota++ {
c.Logf("quota=%d", quota)
shouldCreate := []arvados.InstanceType{}
- for i := 0; i < quota; i++ {
+ for i := 1; i < quota; i++ {
shouldCreate = append(shouldCreate, test.InstanceType(3))
}
queue := test.Queue{
}
queue.Update()
pool := stubPool{
- atQuota: quota == 0,
+ quota: quota,
unalloc: map[arvados.InstanceType]int{
test.InstanceType(2): 2,
},
}
New(ctx, &queue, &pool, time.Millisecond, time.Millisecond).runQueue()
c.Check(pool.creates, check.DeepEquals, shouldCreate)
- c.Check(pool.starts, check.DeepEquals, []string{})
- c.Check(pool.shutdowns, check.Not(check.Equals), 0)
+ if len(shouldCreate) == 0 {
+ c.Check(pool.starts, check.DeepEquals, []string{})
+ c.Check(pool.shutdowns, check.Not(check.Equals), 0)
+ } else {
+ c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})
+ c.Check(pool.shutdowns, check.Equals, 0)
+ }
}
}
func (*SchedulerSuite) TestStartWhileCreating(c *check.C) {
ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
pool := stubPool{
+ quota: 1000,
unalloc: map[arvados.InstanceType]int{
test.InstanceType(1): 2,
test.InstanceType(2): 2,
func (*SchedulerSuite) TestKillNonexistentContainer(c *check.C) {
ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
pool := stubPool{
+ quota: 1000,
unalloc: map[arvados.InstanceType]int{
test.InstanceType(2): 0,
},
test.InstanceType(2): 0,
},
running: map[string]time.Time{
- test.ContainerUUID(2): time.Time{},
+ test.ContainerUUID(2): {},
},
}
queue := test.Queue{