if umnterr != nil {
runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
+ runner.ArvMount.Process.Kill()
} else {
// If arv-mount --unmount gets stuck for any reason, we
// don't want to wait for it forever. Do Wait() in a goroutine
// populated.
Container arvados.Container `json:"container"`
InstanceType arvados.InstanceType `json:"instance_type"`
+ FirstSeenAt time.Time `json:"first_seen_at"`
}
// String implements fmt.Stringer by returning the queued container's
delete(cq.current, uuid)
}
+// Caller must have lock.
func (cq *Queue) addEnt(uuid string, ctr arvados.Container) {
it, err := cq.chooseType(&ctr)
if err != nil && (ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked) {
"Priority": ctr.Priority,
"InstanceType": it.Name,
}).Info("adding container to queue")
- cq.current[uuid] = QueueEnt{Container: ctr, InstanceType: it}
+ cq.current[uuid] = QueueEnt{Container: ctr, InstanceType: it, FirstSeenAt: time.Now()}
}
// Lock acquires the dispatch lock for the given container.
sorted = append(sorted, ent)
}
sort.Slice(sorted, func(i, j int) bool {
- return sorted[i].Container.Priority > sorted[j].Container.Priority
+ if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
+ return pi > pj
+ } else {
+ // When containers have identical priority,
+ // start them in the order we first noticed
+ // them. This avoids extra lock/unlock cycles
+ // when we unlock the containers that don't
+ // fit in the available pool.
+ return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
+ }
})
running := sch.pool.Running()
// starve this one by using keeping
// idle workers alive on different
// instance types.
- logger.Debug("unlocking: AtQuota and no unalloc workers")
- sch.queue.Unlock(ctr.UUID)
+ logger.Trace("overquota")
overquota = sorted[i:]
break tryrun
} else if logger.Info("creating new instance"); sch.pool.Create(it) {
// avoid getting starved here if
// instances of a specific type always
// fail.
+ logger.Trace("pool declined to create new instance")
continue
}
starts: []string{},
canCreate: 0,
}
- New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond).runQueue()
+ sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch.runQueue()
+ sch.sync()
+ sch.runQueue()
+ sch.sync()
c.Check(pool.creates, check.DeepEquals, shouldCreate)
if len(shouldCreate) == 0 {
c.Check(pool.starts, check.DeepEquals, []string{})
- c.Check(pool.shutdowns, check.Not(check.Equals), 0)
} else {
c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})
- c.Check(pool.shutdowns, check.Equals, 0)
}
+ c.Check(pool.shutdowns, check.Equals, 3-quota)
+ c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
+ {UUID: "zzzzz-dz642-000000000000003", From: "Locked", To: "Queued"},
+ {UUID: "zzzzz-dz642-000000000000002", From: "Locked", To: "Queued"},
+ })
+ }
+}
+
+// Don't flap lock/unlock when equal-priority containers compete for
+// limited workers.
+//
+// (Unless we use FirstSeenAt as a secondary sort key, each runQueue()
+// tends to choose a different one of the equal-priority containers as
+// the "first" one that should be locked, and unlock the one it chose
+// last time. This generates logging noise, and fails containers by
+// reaching MaxDispatchAttempts quickly.)
+func (*SchedulerSuite) TestEqualPriorityContainers(c *check.C) {
+ logger := ctxlog.TestLogger(c)
+ ctx := ctxlog.Context(context.Background(), logger)
+ queue := test.Queue{
+ ChooseType: chooseType,
+ Logger: logger,
+ }
+ for i := 0; i < 8; i++ {
+ queue.Containers = append(queue.Containers, arvados.Container{
+ UUID: test.ContainerUUID(i),
+ Priority: 333,
+ State: arvados.ContainerStateQueued,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 3,
+ RAM: 3 << 30,
+ },
+ })
+ }
+ queue.Update()
+ pool := stubPool{
+ quota: 2,
+ unalloc: map[arvados.InstanceType]int{
+ test.InstanceType(3): 1,
+ },
+ idle: map[arvados.InstanceType]int{
+ test.InstanceType(3): 1,
+ },
+ running: map[string]time.Time{},
+ creates: []arvados.InstanceType{},
+ starts: []string{},
+ canCreate: 1,
+ }
+ sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ for i := 0; i < 30; i++ {
+ sch.runQueue()
+ sch.sync()
+ time.Sleep(time.Millisecond)
+ }
+ c.Check(pool.shutdowns, check.Equals, 0)
+ c.Check(pool.starts, check.HasLen, 1)
+ unlocked := map[string]int{}
+ for _, chg := range queue.StateChanges() {
+ if chg.To == arvados.ContainerStateQueued {
+ unlocked[chg.UUID]++
+ }
+ }
+ for uuid, count := range unlocked {
+ c.Check(count, check.Equals, 1, check.Commentf("%s", uuid))
}
}
"github.com/sirupsen/logrus"
)
+var reportedUnexpectedState = false
+
// sync resolves discrepancies between the queue and the pool:
//
// Lingering crunch-run processes for finalized and unlocked/requeued
// a network outage and is still
// preparing to run a container that
// has already been unlocked/requeued.
- go sch.kill(uuid, fmt.Sprintf("state=%s", ent.Container.State))
+ go sch.kill(uuid, fmt.Sprintf("pool says running, but queue says state=%s", ent.Container.State))
} else if ent.Container.Priority == 0 {
sch.logger.WithFields(logrus.Fields{
"ContainerUUID": uuid,
go sch.requeue(ent, "priority=0")
}
default:
- sch.logger.WithFields(logrus.Fields{
- "ContainerUUID": uuid,
- "State": ent.Container.State,
- }).Error("BUG: unexpected state")
+ if !reportedUnexpectedState {
+ sch.logger.WithFields(logrus.Fields{
+ "ContainerUUID": uuid,
+ "State": ent.Container.State,
+ }).Error("BUG: unexpected state")
+ reportedUnexpectedState = true
+ }
}
}
for uuid := range running {
return
}
defer sch.uuidUnlock(uuid)
+ sch.logger.WithFields(logrus.Fields{
+ "ContainerUUID": uuid,
+ "reason": reason,
+ }).Debug("kill")
sch.pool.KillContainer(uuid, reason)
sch.pool.ForgetContainer(uuid)
}
Logger logrus.FieldLogger
- entries map[string]container.QueueEnt
- updTime time.Time
- subscribers map[<-chan struct{}]chan struct{}
+ entries map[string]container.QueueEnt
+ updTime time.Time
+ subscribers map[<-chan struct{}]chan struct{}
+ stateChanges []QueueStateChange
mtx sync.Mutex
}
+type QueueStateChange struct {
+ UUID string
+ From arvados.ContainerState
+ To arvados.ContainerState
+}
+
+// All calls to Lock/Unlock/Cancel to date.
+func (q *Queue) StateChanges() []QueueStateChange {
+ q.mtx.Lock()
+ defer q.mtx.Unlock()
+ return q.stateChanges
+}
+
// Entries returns the containers that were queued when Update was
// last called.
func (q *Queue) Entries() (map[string]container.QueueEnt, time.Time) {
// caller must have lock.
func (q *Queue) changeState(uuid string, from, to arvados.ContainerState) error {
ent := q.entries[uuid]
+ q.stateChanges = append(q.stateChanges, QueueStateChange{uuid, from, to})
if ent.Container.State != from {
return fmt.Errorf("changeState failed: state=%q", ent.Container.State)
}
upd[ctr.UUID] = container.QueueEnt{
Container: ctr,
InstanceType: it,
+ FirstSeenAt: time.Now(),
}
}
}
if attempted:
# Report buffered stderr from previous call to fusermount,
# now that we know it didn't succeed.
- sys.stderr.write(fusermount_output)
+ sys.stderr.buffer.write(fusermount_output)
delay = 1
if deadline:
WORKBENCH2_BRANCH=main
fi
+# Update this to the docker tag for the version on releases.
+DEFAULT_TAG=
+
PG_DATA="$ARVBOX_DATA/postgres"
VAR_DATA="$ARVBOX_DATA/var"
PASSENGER="$ARVBOX_DATA/passenger"
fi
fi
+ if test -z "$TAG" -a -n "$DEFAULT_TAG"; then
+ TAG=":$DEFAULT_TAG"
+ fi
+
if [[ "$CONFIG" =~ ^public ]] ; then
if test -n "$ARVBOX_PUBLISH_IP" ; then
localip=$ARVBOX_PUBLISH_IP