+func (suite *PoolSuite) TestResumeAfterRestart(c *check.C) {
+ type1 := test.InstanceType(1)
+ type2 := test.InstanceType(2)
+ type3 := test.InstanceType(3)
+ waitForIdle := func(pool *Pool, notify <-chan struct{}) {
+ timeout := time.NewTimer(time.Second)
+ for {
+ instances := pool.Instances()
+ sort.Slice(instances, func(i, j int) bool {
+ return strings.Compare(instances[i].ArvadosInstanceType, instances[j].ArvadosInstanceType) < 0
+ })
+ if len(instances) == 3 &&
+ instances[0].ArvadosInstanceType == type1.Name &&
+ instances[0].WorkerState == StateIdle.String() &&
+ instances[1].ArvadosInstanceType == type1.Name &&
+ instances[1].WorkerState == StateIdle.String() &&
+ instances[2].ArvadosInstanceType == type2.Name &&
+ instances[2].WorkerState == StateIdle.String() {
+ return
+ }
+ select {
+ case <-timeout.C:
+ c.Logf("pool.Instances() == %#v", instances)
+ c.Error("timed out")
+ return
+ case <-notify:
+ }
+ }
+ }
+
+ logger := ctxlog.TestLogger(c)
+ driver := &test.StubDriver{}
+ instanceSetID := cloud.InstanceSetID("test-instance-set-id")
+ is, err := driver.InstanceSet(nil, instanceSetID, nil, logger)
+ c.Assert(err, check.IsNil)
+
+ newExecutor := func(cloud.Instance) Executor {
+ return &stubExecutor{
+ response: map[string]stubResp{
+ "crunch-run-custom --list": {},
+ "true": {},
+ },
+ }
+ }
+
+ cluster := &arvados.Cluster{
+ Containers: arvados.ContainersConfig{
+ CloudVMs: arvados.CloudVMsConfig{
+ BootProbeCommand: "true",
+ MaxProbesPerSecond: 1000,
+ ProbeInterval: arvados.Duration(time.Millisecond * 10),
+ SyncInterval: arvados.Duration(time.Millisecond * 10),
+ TagKeyPrefix: "testprefix:",
+ },
+ CrunchRunCommand: "crunch-run-custom",
+ },
+ InstanceTypes: arvados.InstanceTypeMap{
+ type1.Name: type1,
+ type2.Name: type2,
+ type3.Name: type3,
+ },
+ }
+
+ pool := NewPool(logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), instanceSetID, is, newExecutor, nil, cluster)
+ notify := pool.Subscribe()
+ defer pool.Unsubscribe(notify)
+ pool.Create(type1)
+ pool.Create(type1)
+ pool.Create(type2)
+ waitForIdle(pool, notify)
+ var heldInstanceID cloud.InstanceID
+ for _, inst := range pool.Instances() {
+ if inst.ArvadosInstanceType == type2.Name {
+ heldInstanceID = cloud.InstanceID(inst.Instance)
+ pool.SetIdleBehavior(heldInstanceID, IdleBehaviorHold)
+ }
+ }
+ // Wait for the tags to save to the cloud provider
+ tagKey := cluster.Containers.CloudVMs.TagKeyPrefix + tagKeyIdleBehavior
+ deadline := time.Now().Add(time.Second)
+ for !func() bool {
+ pool.mtx.RLock()
+ defer pool.mtx.RUnlock()
+ for _, wkr := range pool.workers {
+ if wkr.instType == type2 {
+ return wkr.instance.Tags()[tagKey] == string(IdleBehaviorHold)
+ }
+ }
+ return false
+ }() {
+ if time.Now().After(deadline) {
+ c.Fatal("timeout")
+ }
+ time.Sleep(time.Millisecond * 10)
+ }
+ pool.Stop()
+
+ c.Log("------- starting new pool, waiting to recover state")
+
+ pool2 := NewPool(logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), instanceSetID, is, newExecutor, nil, cluster)
+ notify2 := pool2.Subscribe()
+ defer pool2.Unsubscribe(notify2)
+ waitForIdle(pool2, notify2)
+ for _, inst := range pool2.Instances() {
+ if inst.ArvadosInstanceType == type2.Name {
+ c.Check(inst.Instance, check.Equals, heldInstanceID)
+ c.Check(inst.IdleBehavior, check.Equals, IdleBehaviorHold)
+ } else {
+ c.Check(inst.IdleBehavior, check.Equals, IdleBehaviorRun)
+ }
+ }
+ pool2.Stop()