X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/8052381fb4e7aceb52497e8378b596178cf5af7c..7aaf9f22aa646077b4b7fd961d6b731185b88137:/lib/dispatchcloud/container/queue.go diff --git a/lib/dispatchcloud/container/queue.go b/lib/dispatchcloud/container/queue.go index 7a41d47c39..938ef915f2 100644 --- a/lib/dispatchcloud/container/queue.go +++ b/lib/dispatchcloud/container/queue.go @@ -5,11 +5,12 @@ package container import ( + "errors" "io" "sync" "time" - "git.curoverse.com/arvados.git/sdk/go/arvados" + "git.arvados.org/arvados.git/sdk/go/arvados" "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" ) @@ -25,10 +26,12 @@ type APIClient interface { // A QueueEnt is an entry in the queue, consisting of a container // record and the instance type that should be used to run it. type QueueEnt struct { - // The container to run. Only the UUID, State, Priority, and - // RuntimeConstraints fields are populated. - Container arvados.Container - InstanceType arvados.InstanceType + // The container to run. Only the UUID, State, Priority, + // RuntimeConstraints, Mounts, and ContainerImage fields are + // populated. + Container arvados.Container `json:"container"` + InstanceType arvados.InstanceType `json:"instance_type"` + FirstSeenAt time.Time `json:"first_seen_at"` } // String implements fmt.Stringer by returning the queued container's @@ -52,7 +55,6 @@ func (c *QueueEnt) String() string { // cache up to date. type Queue struct { logger logrus.FieldLogger - reg *prometheus.Registry chooseType typeChooser client APIClient @@ -78,14 +80,17 @@ type Queue struct { // Arvados cluster's queue during Update, chooseType will be called to // assign an appropriate arvados.InstanceType for the queue entry. func NewQueue(logger logrus.FieldLogger, reg *prometheus.Registry, chooseType typeChooser, client APIClient) *Queue { - return &Queue{ + cq := &Queue{ logger: logger, - reg: reg, chooseType: chooseType, client: client, current: map[string]QueueEnt{}, subscribers: map[<-chan struct{}]chan struct{}{}, } + if reg != nil { + go cq.runMetrics(reg) + } + return cq } // Subscribe returns a channel that becomes ready to receive when an @@ -130,8 +135,8 @@ func (cq *Queue) Forget(uuid string) { cq.mtx.Lock() defer cq.mtx.Unlock() ctr := cq.current[uuid].Container - if ctr.State == arvados.ContainerStateComplete || ctr.State == arvados.ContainerStateCancelled { - delete(cq.current, uuid) + if ctr.State == arvados.ContainerStateComplete || ctr.State == arvados.ContainerStateCancelled || (ctr.State == arvados.ContainerStateQueued && ctr.Priority == 0) { + cq.delEnt(uuid, ctr.State) } } @@ -141,11 +146,11 @@ func (cq *Queue) Forget(uuid string) { func (cq *Queue) Get(uuid string) (arvados.Container, bool) { cq.mtx.Lock() defer cq.mtx.Unlock() - if ctr, ok := cq.current[uuid]; !ok { + ctr, ok := cq.current[uuid] + if !ok { return arvados.Container{}, false - } else { - return ctr.Container, true } + return ctr.Container, true } // Entries returns all cache entries, keyed by container UUID. @@ -184,7 +189,9 @@ func (cq *Queue) Update() error { cq.mtx.Lock() defer cq.mtx.Unlock() for uuid, ctr := range next { - if _, keep := cq.dontupdate[uuid]; keep { + if _, dontupdate := cq.dontupdate[uuid]; dontupdate { + // Don't clobber a local update that happened + // after we started polling. continue } if cur, ok := cq.current[uuid]; !ok { @@ -194,13 +201,18 @@ func (cq *Queue) Update() error { cq.current[uuid] = cur } } - for uuid := range cq.current { - if _, keep := cq.dontupdate[uuid]; keep { - continue - } else if _, keep = next[uuid]; keep { + for uuid, ent := range cq.current { + if _, dontupdate := cq.dontupdate[uuid]; dontupdate { + // Don't expunge an entry that was + // added/updated locally after we started + // polling. continue - } else { - delete(cq.current, uuid) + } else if _, stillpresent := next[uuid]; !stillpresent { + // Expunge an entry that no longer appears in + // the poll response (evidently it's + // cancelled, completed, deleted, or taken by + // a different dispatcher). + cq.delEnt(uuid, ent.Container.State) } } cq.dontupdate = nil @@ -209,12 +221,38 @@ func (cq *Queue) Update() error { return nil } +// Caller must have lock. +func (cq *Queue) delEnt(uuid string, state arvados.ContainerState) { + cq.logger.WithFields(logrus.Fields{ + "ContainerUUID": uuid, + "State": state, + }).Info("dropping container from queue") + delete(cq.current, uuid) +} + +// Caller must have lock. func (cq *Queue) addEnt(uuid string, ctr arvados.Container) { it, err := cq.chooseType(&ctr) if err != nil && (ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked) { + // We assume here that any chooseType error is a hard + // error: it wouldn't help to try again, or to leave + // it for a different dispatcher process to attempt. errorString := err.Error() - cq.logger.WithField("ContainerUUID", ctr.UUID).Warn("cancel container with no suitable instance type") + logger := cq.logger.WithField("ContainerUUID", ctr.UUID) + logger.WithError(err).Warn("cancel container with no suitable instance type") go func() { + if ctr.State == arvados.ContainerStateQueued { + // Can't set runtime error without + // locking first. + err := cq.Lock(ctr.UUID) + if err != nil { + logger.WithError(err).Warn("lock failed") + return + // ...and try again on the + // next Update, if the problem + // still exists. + } + } var err error defer func() { if err == nil { @@ -229,14 +267,8 @@ func (cq *Queue) addEnt(uuid string, ctr arvados.Container) { if latest.State == arvados.ContainerStateCancelled { return } - cq.logger.WithField("ContainerUUID", ctr.UUID).WithError(err).Warn("error while trying to cancel unsatisfiable container") + logger.WithError(err).Warn("error while trying to cancel unsatisfiable container") }() - if ctr.State == arvados.ContainerStateQueued { - err = cq.Lock(ctr.UUID) - if err != nil { - return - } - } err = cq.setRuntimeError(ctr.UUID, errorString) if err != nil { return @@ -248,7 +280,13 @@ func (cq *Queue) addEnt(uuid string, ctr arvados.Container) { }() return } - cq.current[uuid] = QueueEnt{Container: ctr, InstanceType: it} + cq.logger.WithFields(logrus.Fields{ + "ContainerUUID": ctr.UUID, + "State": ctr.State, + "Priority": ctr.Priority, + "InstanceType": it.Name, + }).Info("adding container to queue") + cq.current[uuid] = QueueEnt{Container: ctr, InstanceType: it, FirstSeenAt: time.Now()} } // Lock acquires the dispatch lock for the given container. @@ -275,15 +313,14 @@ func (cq *Queue) setRuntimeError(uuid, errorString string) error { // Cancel cancels the given container. func (cq *Queue) Cancel(uuid string) error { - err := cq.client.RequestAndDecode(nil, "PUT", "arvados/v1/containers/"+uuid, nil, map[string]map[string]interface{}{ + var resp arvados.Container + err := cq.client.RequestAndDecode(&resp, "PUT", "arvados/v1/containers/"+uuid, nil, map[string]map[string]interface{}{ "container": {"state": arvados.ContainerStateCancelled}, }) if err != nil { return err } - cq.mtx.Lock() - defer cq.mtx.Unlock() - cq.notify() + cq.updateWithResp(uuid, resp) return nil } @@ -293,20 +330,32 @@ func (cq *Queue) apiUpdate(uuid, action string) error { if err != nil { return err } + cq.updateWithResp(uuid, resp) + return nil +} +// Update the local queue with the response received from a +// state-changing API request (lock/unlock/cancel). +func (cq *Queue) updateWithResp(uuid string, resp arvados.Container) { cq.mtx.Lock() defer cq.mtx.Unlock() if cq.dontupdate != nil { cq.dontupdate[uuid] = struct{}{} } - if ent, ok := cq.current[uuid]; !ok { - cq.addEnt(uuid, resp) - } else { - ent.Container.State, ent.Container.Priority, ent.Container.LockedByUUID = resp.State, resp.Priority, resp.LockedByUUID - cq.current[uuid] = ent + ent, ok := cq.current[uuid] + if !ok { + // Container is not in queue (e.g., it was not added + // because there is no suitable instance type, and + // we're just locking/updating it in order to set an + // error message). No need to add it, and we don't + // necessarily have enough information to add it here + // anyway because lock/unlock responses don't include + // runtime_constraints. + return } + ent.Container.State, ent.Container.Priority, ent.Container.LockedByUUID = resp.State, resp.Priority, resp.LockedByUUID + cq.current[uuid] = ent cq.notify() - return nil } func (cq *Queue) poll() (map[string]*arvados.Container, error) { @@ -335,7 +384,7 @@ func (cq *Queue) poll() (map[string]*arvados.Container, error) { *next[upd.UUID] = upd } } - selectParam := []string{"uuid", "state", "priority", "runtime_constraints"} + selectParam := []string{"uuid", "state", "priority", "runtime_constraints", "container_image", "mounts", "scheduling_parameters", "created_at"} limitParam := 1000 mine, err := cq.fetchAll(arvados.ResourceListParams{ @@ -362,32 +411,62 @@ func (cq *Queue) poll() (map[string]*arvados.Container, error) { } apply(avail) - var missing []string + missing := map[string]bool{} cq.mtx.Lock() for uuid, ent := range cq.current { if next[uuid] == nil && ent.Container.State != arvados.ContainerStateCancelled && ent.Container.State != arvados.ContainerStateComplete { - missing = append(missing, uuid) + missing[uuid] = true } } cq.mtx.Unlock() - for i, page := 0, 20; i < len(missing); i += page { - batch := missing[i:] - if len(batch) > page { - batch = batch[:page] + for len(missing) > 0 { + var batch []string + for uuid := range missing { + batch = append(batch, uuid) + if len(batch) == 20 { + break + } } + filters := []arvados.Filter{{"uuid", "in", batch}} ended, err := cq.fetchAll(arvados.ResourceListParams{ Select: selectParam, Order: "uuid", Count: "none", - Filters: []arvados.Filter{{"uuid", "in", batch}}, + Filters: filters, }) if err != nil { return nil, err } apply(ended) + if len(ended) == 0 { + // This is the only case where we can conclude + // a container has been deleted from the + // database. A short (but non-zero) page, on + // the other hand, can be caused by a response + // size limit. + for _, uuid := range batch { + cq.logger.WithField("ContainerUUID", uuid).Warn("container not found by controller (deleted?)") + delete(missing, uuid) + cq.mtx.Lock() + cq.delEnt(uuid, cq.current[uuid].Container.State) + cq.mtx.Unlock() + } + continue + } + for _, ctr := range ended { + if _, ok := missing[ctr.UUID]; !ok { + msg := "BUG? server response did not match requested filters, erroring out rather than risk deadlock" + cq.logger.WithFields(logrus.Fields{ + "ContainerUUID": ctr.UUID, + "Filters": filters, + }).Error(msg) + return nil, errors.New(msg) + } + delete(missing, ctr.UUID) + } } return next, nil } @@ -420,3 +499,34 @@ func (cq *Queue) fetchAll(initialParams arvados.ResourceListParams) ([]arvados.C } return results, nil } + +func (cq *Queue) runMetrics(reg *prometheus.Registry) { + mEntries := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "arvados", + Subsystem: "dispatchcloud", + Name: "queue_entries", + Help: "Number of active container entries in the controller database.", + }, []string{"state", "instance_type"}) + reg.MustRegister(mEntries) + + type entKey struct { + state arvados.ContainerState + inst string + } + count := map[entKey]int{} + + ch := cq.Subscribe() + defer cq.Unsubscribe(ch) + for range ch { + for k := range count { + count[k] = 0 + } + ents, _ := cq.Entries() + for _, ent := range ents { + count[entKey{ent.Container.State, ent.InstanceType.Name}]++ + } + for k, v := range count { + mEntries.WithLabelValues(string(k.state), k.inst).Set(float64(v)) + } + } +}