12552: Prioritize containers according to top-level requests.
[arvados.git] / services / crunch-dispatch-slurm / squeue.go
index df3c8680af760ecf43e6c516110f75b204d72ade..ee79c6f774c1ca4cb277f1c356ebca792d790f49 100644 (file)
@@ -79,11 +79,27 @@ func (sqc *SqueueChecker) reniceAll() {
                        // (perhaps it's not an Arvados job)
                        continue
                }
+               if j.priority == 0 {
+                       // SLURM <= 15.x implements "hold" by setting
+                       // priority to 0. If we include held jobs
+                       // here, we'll end up trying to push other
+                       // jobs below them using negative priority,
+                       // which won't help anything.
+                       continue
+               }
                jobs = append(jobs, j)
        }
 
        sort.Slice(jobs, func(i, j int) bool {
-               return jobs[i].wantPriority > jobs[j].wantPriority
+               if jobs[i].wantPriority != jobs[j].wantPriority {
+                       return jobs[i].wantPriority > jobs[j].wantPriority
+               } else {
+                       // break ties with container uuid --
+                       // otherwise, the ordering would change from
+                       // one interval to the next, and we'd do many
+                       // pointless slurm queue rearrangements.
+                       return jobs[i].uuid > jobs[j].uuid
+               }
        })
        renice := wantNice(jobs, sqc.PrioritySpread)
        for i, job := range jobs {
@@ -113,7 +129,7 @@ func (sqc *SqueueChecker) check() {
        sqc.L.Lock()
        defer sqc.L.Unlock()
 
-       cmd := sqc.Slurm.QueueCommand([]string{"--all", "--noheader", "--format=%j %y %Q"})
+       cmd := sqc.Slurm.QueueCommand([]string{"--all", "--noheader", "--format=%j %y %Q %T %r"})
        stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
        cmd.Stdout, cmd.Stderr = stdout, stderr
        if err := cmd.Run(); err != nil {
@@ -127,9 +143,9 @@ func (sqc *SqueueChecker) check() {
                if line == "" {
                        continue
                }
-               var uuid string
+               var uuid, state, reason string
                var n, p int64
-               if _, err := fmt.Sscan(line, &uuid, &n, &p); err != nil {
+               if _, err := fmt.Sscan(line, &uuid, &n, &p, &state, &reason); err != nil {
                        log.Printf("warning: ignoring unparsed line in squeue output: %q", line)
                        continue
                }
@@ -140,6 +156,23 @@ func (sqc *SqueueChecker) check() {
                replacing.priority = p
                replacing.nice = n
                newq[uuid] = replacing
+
+               if state == "PENDING" && reason == "BadConstraints" && p == 0 && replacing.wantPriority > 0 {
+                       // When using SLURM 14.x or 15.x, our queued
+                       // jobs land in this state when "scontrol
+                       // reconfigure" invalidates their feature
+                       // constraints by clearing all node features.
+                       // They stay in this state even after the
+                       // features reappear, until we run "scontrol
+                       // release {jobid}".
+                       //
+                       // "scontrol release" is silent and successful
+                       // regardless of whether the features have
+                       // reappeared, so rather than second-guessing
+                       // whether SLURM is ready, we just keep trying
+                       // this until it works.
+                       sqc.Slurm.Release(uuid)
+               }
        }
        sqc.queue = newq
        sqc.Broadcast()