// starve this one by using keeping
// idle workers alive on different
// instance types.
- logger.Debug("overquota")
+ logger.Trace("overquota")
overquota = sorted[i:]
break tryrun
} else if logger.Info("creating new instance"); sch.pool.Create(it) {
// avoid getting starved here if
// instances of a specific type always
// fail.
+ logger.Trace("pool declined to create new instance")
continue
}
// a network outage and is still
// preparing to run a container that
// has already been unlocked/requeued.
- go sch.kill(uuid, fmt.Sprintf("state=%s", ent.Container.State))
+ go sch.kill(uuid, fmt.Sprintf("pool says running, but queue says state=%s", ent.Container.State))
} else if ent.Container.Priority == 0 {
sch.logger.WithFields(logrus.Fields{
"ContainerUUID": uuid,
return
}
defer sch.uuidUnlock(uuid)
+ sch.logger.WithFields(logrus.Fields{
+ "ContainerUUID": uuid,
+ "reason": reason,
+ }).Debug("kill")
sch.pool.KillContainer(uuid, reason)
sch.pool.ForgetContainer(uuid)
}