Merge branch '13517-buffer-leak'
[arvados.git] / services / crunch-dispatch-slurm / squeue.go
index 8862d166a4e75e1908b240302e209dc44a132c2f..742943f197580e186e7fd1f7b8084a1357f3661d 100644 (file)
@@ -91,7 +91,15 @@ func (sqc *SqueueChecker) reniceAll() {
        }
 
        sort.Slice(jobs, func(i, j int) bool {
-               return jobs[i].wantPriority > jobs[j].wantPriority
+               if jobs[i].wantPriority != jobs[j].wantPriority {
+                       return jobs[i].wantPriority > jobs[j].wantPriority
+               } else {
+                       // break ties with container uuid --
+                       // otherwise, the ordering would change from
+                       // one interval to the next, and we'd do many
+                       // pointless slurm queue rearrangements.
+                       return jobs[i].uuid > jobs[j].uuid
+               }
        })
        renice := wantNice(jobs, sqc.PrioritySpread)
        for i, job := range jobs {
@@ -121,7 +129,7 @@ func (sqc *SqueueChecker) check() {
        sqc.L.Lock()
        defer sqc.L.Unlock()
 
-       cmd := sqc.Slurm.QueueCommand([]string{"--all", "--noheader", "--format=%j %y %Q"})
+       cmd := sqc.Slurm.QueueCommand([]string{"--all", "--noheader", "--format=%j %y %Q %T %r"})
        stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
        cmd.Stdout, cmd.Stderr = stdout, stderr
        if err := cmd.Run(); err != nil {
@@ -135,9 +143,9 @@ func (sqc *SqueueChecker) check() {
                if line == "" {
                        continue
                }
-               var uuid string
+               var uuid, state, reason string
                var n, p int64
-               if _, err := fmt.Sscan(line, &uuid, &n, &p); err != nil {
+               if _, err := fmt.Sscan(line, &uuid, &n, &p, &state, &reason); err != nil {
                        log.Printf("warning: ignoring unparsed line in squeue output: %q", line)
                        continue
                }
@@ -148,6 +156,30 @@ func (sqc *SqueueChecker) check() {
                replacing.priority = p
                replacing.nice = n
                newq[uuid] = replacing
+
+               if state == "PENDING" && ((reason == "BadConstraints" && p == 0) || reason == "launch failed requeued held") && replacing.wantPriority > 0 {
+                       // When using SLURM 14.x or 15.x, our queued
+                       // jobs land in this state when "scontrol
+                       // reconfigure" invalidates their feature
+                       // constraints by clearing all node features.
+                       // They stay in this state even after the
+                       // features reappear, until we run "scontrol
+                       // release {jobid}".
+                       //
+                       // "scontrol release" is silent and successful
+                       // regardless of whether the features have
+                       // reappeared, so rather than second-guessing
+                       // whether SLURM is ready, we just keep trying
+                       // this until it works.
+                       //
+                       // "launch failed requeued held" seems to be
+                       // another manifestation of this problem,
+                       // resolved the same way.
+                       log.Printf("releasing held job %q", uuid)
+                       sqc.Slurm.Release(uuid)
+               } else if p < 1<<20 && replacing.wantPriority > 0 {
+                       log.Printf("warning: job %q has low priority %d, nice %d, state %q, reason %q", uuid, p, n, state, reason)
+               }
        }
        sqc.queue = newq
        sqc.Broadcast()