}
c.Check(running, check.DeepEquals, map[string]bool{uuids[3]: false, uuids[6]: false})
}
+
+func (*SchedulerSuite) TestKillNonexistentContainer(c *check.C) {
+ ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+ pool := stubPool{
+ unalloc: map[arvados.InstanceType]int{
+ test.InstanceType(2): 0,
+ },
+ idle: map[arvados.InstanceType]int{
+ test.InstanceType(2): 0,
+ },
+ running: map[string]time.Time{
+ test.ContainerUUID(2): time.Time{},
+ },
+ }
+ queue := test.Queue{
+ ChooseType: chooseType,
+ Containers: []arvados.Container{
+ {
+ // create a new worker
+ UUID: test.ContainerUUID(1),
+ Priority: 1,
+ State: arvados.ContainerStateLocked,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 1,
+ RAM: 1 << 30,
+ },
+ },
+ },
+ }
+ queue.Update()
+ sch := New(ctx, &queue, &pool, time.Millisecond, time.Millisecond)
+ c.Check(pool.running, check.HasLen, 1)
+ sch.sync()
+ for deadline := time.Now().Add(time.Second); len(pool.Running()) > 0 && time.Now().Before(deadline); time.Sleep(time.Millisecond) {
+ }
+ c.Check(pool.Running(), check.HasLen, 0)
+}
switch ent.Container.State {
case arvados.ContainerStateRunning:
if !running {
- go sch.cancel(ent, "not running on any worker")
+ go sch.cancel(uuid, "not running on any worker")
} else if !exited.IsZero() && qUpdated.After(exited) {
- go sch.cancel(ent, "state=\"Running\" after crunch-run exited")
+ go sch.cancel(uuid, "state=\"Running\" after crunch-run exited")
} else if ent.Container.Priority == 0 {
- go sch.kill(ent, "priority=0")
+ go sch.kill(uuid, "priority=0")
}
case arvados.ContainerStateComplete, arvados.ContainerStateCancelled:
if running {
// of kill() will be to make the
// worker available for the next
// container.
- go sch.kill(ent, fmt.Sprintf("state=%q", ent.Container.State))
+ go sch.kill(uuid, fmt.Sprintf("state=%q", ent.Container.State))
} else {
sch.logger.WithFields(logrus.Fields{
"ContainerUUID": uuid,
// a network outage and is still
// preparing to run a container that
// has already been unlocked/requeued.
- go sch.kill(ent, fmt.Sprintf("state=%q", ent.Container.State))
+ go sch.kill(uuid, fmt.Sprintf("state=%q", ent.Container.State))
}
case arvados.ContainerStateLocked:
if running && !exited.IsZero() && qUpdated.After(exited) {
go sch.requeue(ent, "crunch-run exited")
} else if running && exited.IsZero() && ent.Container.Priority == 0 {
- go sch.kill(ent, "priority=0")
+ go sch.kill(uuid, "priority=0")
} else if !running && ent.Container.Priority == 0 {
go sch.requeue(ent, "priority=0")
}
}).Error("BUG: unexpected state")
}
}
+ for uuid := range running {
+ if _, known := qEntries[uuid]; !known {
+ go sch.kill(uuid, "not in queue")
+ }
+ }
}
-func (sch *Scheduler) cancel(ent container.QueueEnt, reason string) {
- uuid := ent.Container.UUID
+func (sch *Scheduler) cancel(uuid string, reason string) {
if !sch.uuidLock(uuid, "cancel") {
return
}
}
}
-func (sch *Scheduler) kill(ent container.QueueEnt, reason string) {
- uuid := ent.Container.UUID
+func (sch *Scheduler) kill(uuid string, reason string) {
logger := sch.logger.WithField("ContainerUUID", uuid)
logger.Debugf("killing crunch-run process because %s", reason)
sch.pool.KillContainer(uuid)