13493: Merge branch 'master' into 13493-federation-proxy
[arvados.git] / services / crunch-dispatch-slurm / squeue.go
index 9514da822b7e748a7498fab0aafb2104371dae65..fd4851eb0a8a92b48fcacef0e4552ce99d0a7f48 100644 (file)
@@ -14,11 +14,14 @@ import (
        "time"
 )
 
+const slurm15NiceLimit int64 = 10000
+
 type slurmJob struct {
        uuid         string
        wantPriority int64
        priority     int64 // current slurm priority (incorporates nice value)
        nice         int64 // current slurm nice value
+       hitNiceLimit bool
 }
 
 // Squeue implements asynchronous polling monitor of the SLURM queue using the
@@ -103,10 +106,18 @@ func (sqc *SqueueChecker) reniceAll() {
        })
        renice := wantNice(jobs, sqc.PrioritySpread)
        for i, job := range jobs {
-               if renice[i] == job.nice {
+               niceNew := renice[i]
+               if job.hitNiceLimit && niceNew > slurm15NiceLimit {
+                       niceNew = slurm15NiceLimit
+               }
+               if niceNew == job.nice {
                        continue
                }
-               sqc.Slurm.Renice(job.uuid, renice[i])
+               err := sqc.Slurm.Renice(job.uuid, niceNew)
+               if err != nil && niceNew > slurm15NiceLimit && strings.Contains(err.Error(), "Invalid nice value") {
+                       log.Printf("container %q clamping nice values at %d, priority order will not be correct -- see https://dev.arvados.org/projects/arvados/wiki/SLURM_integration#Limited-nice-values-SLURM-15", job.uuid, slurm15NiceLimit)
+                       job.hitNiceLimit = true
+               }
        }
 }
 
@@ -157,21 +168,28 @@ func (sqc *SqueueChecker) check() {
                replacing.nice = n
                newq[uuid] = replacing
 
-               if state == "PENDING" && reason == "BadConstraints" && p == 0 && replacing.wantPriority > 0 {
+               if state == "PENDING" && ((reason == "BadConstraints" && p <= 2*slurm15NiceLimit) || reason == "launch failed requeued held") && replacing.wantPriority > 0 {
                        // When using SLURM 14.x or 15.x, our queued
                        // jobs land in this state when "scontrol
                        // reconfigure" invalidates their feature
                        // constraints by clearing all node features.
                        // They stay in this state even after the
                        // features reappear, until we run "scontrol
-                       // release {jobid}".
+                       // release {jobid}". Priority is usually 0 in
+                       // this state, but sometimes (due to a race
+                       // with nice adjustments?) it's a small
+                       // positive value.
                        //
                        // "scontrol release" is silent and successful
                        // regardless of whether the features have
                        // reappeared, so rather than second-guessing
                        // whether SLURM is ready, we just keep trying
                        // this until it works.
-                       log.Printf("releasing held job %q", uuid)
+                       //
+                       // "launch failed requeued held" seems to be
+                       // another manifestation of this problem,
+                       // resolved the same way.
+                       log.Printf("releasing held job %q (priority=%d, state=%q, reason=%q)", uuid, p, state, reason)
                        sqc.Slurm.Release(uuid)
                } else if p < 1<<20 && replacing.wantPriority > 0 {
                        log.Printf("warning: job %q has low priority %d, nice %d, state %q, reason %q", uuid, p, n, state, reason)