X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/9c983060b6f0fc6dc42310587b9858b7f8b968de..355173ba2e8c42b29011493d1d8c7cc4d69295c6:/services/crunch-dispatch-slurm/squeue.go diff --git a/services/crunch-dispatch-slurm/squeue.go b/services/crunch-dispatch-slurm/squeue.go index b8e3108c7c..742943f197 100644 --- a/services/crunch-dispatch-slurm/squeue.go +++ b/services/crunch-dispatch-slurm/squeue.go @@ -79,18 +79,33 @@ func (sqc *SqueueChecker) reniceAll() { // (perhaps it's not an Arvados job) continue } + if j.priority == 0 { + // SLURM <= 15.x implements "hold" by setting + // priority to 0. If we include held jobs + // here, we'll end up trying to push other + // jobs below them using negative priority, + // which won't help anything. + continue + } jobs = append(jobs, j) } sort.Slice(jobs, func(i, j int) bool { - return jobs[i].wantPriority > jobs[j].wantPriority + if jobs[i].wantPriority != jobs[j].wantPriority { + return jobs[i].wantPriority > jobs[j].wantPriority + } else { + // break ties with container uuid -- + // otherwise, the ordering would change from + // one interval to the next, and we'd do many + // pointless slurm queue rearrangements. + return jobs[i].uuid > jobs[j].uuid + } }) renice := wantNice(jobs, sqc.PrioritySpread) for i, job := range jobs { if renice[i] == job.nice { continue } - log.Printf("updating slurm priority for %q: nice %d => %d", job.uuid, job.nice, renice[i]) sqc.Slurm.Renice(job.uuid, renice[i]) } } @@ -114,7 +129,7 @@ func (sqc *SqueueChecker) check() { sqc.L.Lock() defer sqc.L.Unlock() - cmd := sqc.Slurm.QueueCommand([]string{"--all", "--format=%j %y %Q"}) + cmd := sqc.Slurm.QueueCommand([]string{"--all", "--noheader", "--format=%j %y %Q %T %r"}) stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} cmd.Stdout, cmd.Stderr = stdout, stderr if err := cmd.Run(); err != nil { @@ -128,9 +143,9 @@ func (sqc *SqueueChecker) check() { if line == "" { continue } - var uuid string + var uuid, state, reason string var n, p int64 - if _, err := fmt.Sscan(line, &uuid, &n, &p); err != nil { + if _, err := fmt.Sscan(line, &uuid, &n, &p, &state, &reason); err != nil { log.Printf("warning: ignoring unparsed line in squeue output: %q", line) continue } @@ -141,6 +156,30 @@ func (sqc *SqueueChecker) check() { replacing.priority = p replacing.nice = n newq[uuid] = replacing + + if state == "PENDING" && ((reason == "BadConstraints" && p == 0) || reason == "launch failed requeued held") && replacing.wantPriority > 0 { + // When using SLURM 14.x or 15.x, our queued + // jobs land in this state when "scontrol + // reconfigure" invalidates their feature + // constraints by clearing all node features. + // They stay in this state even after the + // features reappear, until we run "scontrol + // release {jobid}". + // + // "scontrol release" is silent and successful + // regardless of whether the features have + // reappeared, so rather than second-guessing + // whether SLURM is ready, we just keep trying + // this until it works. + // + // "launch failed requeued held" seems to be + // another manifestation of this problem, + // resolved the same way. + log.Printf("releasing held job %q", uuid) + sqc.Slurm.Release(uuid) + } else if p < 1<<20 && replacing.wantPriority > 0 { + log.Printf("warning: job %q has low priority %d, nice %d, state %q, reason %q", uuid, p, n, state, reason) + } } sqc.queue = newq sqc.Broadcast()