// Dispatcher service for Crunch that submits containers to the slurm queue.
import (
- "bufio"
"flag"
"fmt"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+ "git.curoverse.com/arvados.git/sdk/go/config"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
+ "github.com/coreos/go-systemd/daemon"
+ "io"
"io/ioutil"
"log"
"math"
"os"
"os/exec"
"strings"
- "sync"
"time"
)
-type Squeue struct {
- sync.Mutex
- squeueContents []string
- SqueueDone chan struct{}
+// Config used by crunch-dispatch-slurm
+type Config struct {
+ Client arvados.Client
+
+ SbatchArguments []string
+ PollPeriod arvados.Duration
+
+ // crunch-run command to invoke. The container UUID will be
+ // appended. If nil, []string{"crunch-run"} will be used.
+ //
+ // Example: []string{"crunch-run", "--cgroup-parent-subsystem=memory"}
+ CrunchRunCommand []string
}
func main() {
err := doMain()
if err != nil {
- log.Fatalf("%q", err)
+ log.Fatal(err)
}
}
var (
- crunchRunCommand *string
- squeueUpdater Squeue
+ theConfig Config
+ squeueUpdater Squeue
)
+const defaultConfigPath = "/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml"
+
func doMain() error {
flags := flag.NewFlagSet("crunch-dispatch-slurm", flag.ExitOnError)
+ flags.Usage = func() { usage(flags) }
- pollInterval := flags.Int(
- "poll-interval",
- 10,
- "Interval in seconds to poll for queued containers")
-
- crunchRunCommand = flags.String(
- "crunch-run-command",
- "/usr/bin/crunch-run",
- "Crunch command to run container")
+ configPath := flags.String(
+ "config",
+ defaultConfigPath,
+ "`path` to JSON or YAML configuration file")
// Parse args; omit the first arg which is the command name
flags.Parse(os.Args[1:])
+ err := readConfig(&theConfig, *configPath)
+ if err != nil {
+ return err
+ }
+
+ if theConfig.CrunchRunCommand == nil {
+ theConfig.CrunchRunCommand = []string{"crunch-run"}
+ }
+
+ if theConfig.PollPeriod == 0 {
+ theConfig.PollPeriod = arvados.Duration(10 * time.Second)
+ }
+
+ if theConfig.Client.APIHost != "" || theConfig.Client.AuthToken != "" {
+ // Copy real configs into env vars so [a]
+ // MakeArvadosClient() uses them, and [b] they get
+ // propagated to crunch-run via SLURM.
+ os.Setenv("ARVADOS_API_HOST", theConfig.Client.APIHost)
+ os.Setenv("ARVADOS_API_TOKEN", theConfig.Client.AuthToken)
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "")
+ if theConfig.Client.Insecure {
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
+ }
+ os.Setenv("ARVADOS_KEEP_SERVICES", strings.Join(theConfig.Client.KeepServiceURIs, " "))
+ os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
+ } else {
+ log.Printf("warning: Client credentials missing from config, so falling back on environment variables (deprecated).")
+ }
+
arv, err := arvadosclient.MakeArvadosClient()
if err != nil {
log.Printf("Error making Arvados client: %v", err)
}
arv.Retries = 25
+ squeueUpdater.StartMonitor(time.Duration(theConfig.PollPeriod))
+ defer squeueUpdater.Done()
+
dispatcher := dispatch.Dispatcher{
Arv: arv,
RunContainer: run,
- PollInterval: time.Duration(*pollInterval) * time.Second,
+ PollInterval: time.Duration(theConfig.PollPeriod),
DoneProcessing: make(chan struct{})}
- squeueUpdater.SqueueDone = make(chan struct{})
- go squeueUpdater.SyncSqueue(time.Duration(*pollInterval) * time.Second)
+ if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
+ log.Printf("Error notifying init daemon: %v", err)
+ }
err = dispatcher.RunDispatcher()
if err != nil {
return err
}
- squeueUpdater.SqueueDone <- struct{}{}
- close(squeueUpdater.SqueueDone)
-
return nil
}
// sbatchCmd
-func sbatchFunc(container dispatch.Container) *exec.Cmd {
- memPerCPU := math.Ceil((float64(container.RuntimeConstraints["ram"])) / (float64(container.RuntimeConstraints["vcpus"] * 1048576)))
- return exec.Command("sbatch", "--share", "--parsable",
- fmt.Sprintf("--job-name=%s", container.UUID),
- fmt.Sprintf("--mem-per-cpu=%d", int(memPerCPU)),
- fmt.Sprintf("--cpus-per-task=%d", int(container.RuntimeConstraints["vcpus"])),
- fmt.Sprintf("--priority=%d", container.Priority))
+func sbatchFunc(container arvados.Container) *exec.Cmd {
+ memPerCPU := math.Ceil(float64(container.RuntimeConstraints.RAM) / (float64(container.RuntimeConstraints.VCPUs) * 1048576))
+
+ var sbatchArgs []string
+ sbatchArgs = append(sbatchArgs, "--share")
+ sbatchArgs = append(sbatchArgs, theConfig.SbatchArguments...)
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--job-name=%s", container.UUID))
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--mem-per-cpu=%d", int(memPerCPU)))
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs))
+ if container.SchedulingParameters.Partitions != nil {
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--partition=%s", strings.Join(container.SchedulingParameters.Partitions, ",")))
+ }
+
+ return exec.Command("sbatch", sbatchArgs...)
}
-// squeueFunc
-func squeueFunc() *exec.Cmd {
- return exec.Command("squeue", "--format=%j")
+// scancelCmd
+func scancelFunc(container arvados.Container) *exec.Cmd {
+ return exec.Command("scancel", "--name="+container.UUID)
}
// Wrap these so that they can be overridden by tests
var sbatchCmd = sbatchFunc
-var squeueCmd = squeueFunc
+var scancelCmd = scancelFunc
// Submit job to slurm using sbatch.
func submit(dispatcher *dispatch.Dispatcher,
- container dispatch.Container, crunchRunCommand string) (jobid string, submitErr error) {
- submitErr = nil
-
+ container arvados.Container, crunchRunCommand []string) (submitErr error) {
defer func() {
// If we didn't get as far as submitting a slurm job,
// unlock the container and return it to the queue.
// OK, no cleanup needed
return
}
- err := dispatcher.Arv.Update("containers", container.UUID,
- arvadosclient.Dict{
- "container": arvadosclient.Dict{"state": "Queued"}},
- nil)
+ err := dispatcher.Unlock(container.UUID)
if err != nil {
log.Printf("Error unlocking container %s: %v", container.UUID, err)
}
return
}
+ // Mutex between squeue sync and running sbatch or scancel.
+ squeueUpdater.SlurmLock.Lock()
+ defer squeueUpdater.SlurmLock.Unlock()
+
+ log.Printf("sbatch starting: %+q", cmd.Args)
err := cmd.Start()
if err != nil {
- submitErr = fmt.Errorf("Error starting %v: %v", cmd.Args, err)
+ submitErr = fmt.Errorf("Error starting sbatch: %v", err)
return
}
// Send a tiny script on stdin to execute the crunch-run command
// slurm actually enforces that this must be a #! script
- fmt.Fprintf(stdinWriter, "#!/bin/sh\nexec '%s' '%s'\n", crunchRunCommand, container.UUID)
+ io.WriteString(stdinWriter, execScript(append(crunchRunCommand, container.UUID)))
stdinWriter.Close()
err = cmd.Wait()
close(stderrChan)
if err != nil {
- submitErr = fmt.Errorf("Container submission failed %v: %v %v", cmd.Args, err, stderrmsg)
+ submitErr = fmt.Errorf("Container submission failed: %v: %v (stderr: %q)", cmd.Args, err, stderrmsg)
return
}
- // If everything worked out, got the jobid on stdout
- jobid = strings.TrimSpace(string(stdoutMsg))
-
+ log.Printf("sbatch succeeded: %s", strings.TrimSpace(string(stdoutMsg)))
return
}
-func (squeue *Squeue) runSqueue() ([]string, error) {
- var newSqueueContents []string
-
- cmd := squeueCmd()
- sq, err := cmd.StdoutPipe()
- if err != nil {
- return nil, err
- }
- cmd.Start()
- scanner := bufio.NewScanner(sq)
- for scanner.Scan() {
- newSqueueContents = append(newSqueueContents, scanner.Text())
- }
- if err := scanner.Err(); err != nil {
- cmd.Wait()
- return nil, err
- }
-
- err = cmd.Wait()
- if err != nil {
- return nil, err
- }
-
- return newSqueueContents, nil
-}
-
-func (squeue *Squeue) CheckSqueue(uuid string, check bool) (bool, error) {
- if check {
- n, err := squeue.runSqueue()
- if err != nil {
- return false, err
- }
- squeue.Lock()
- squeue.squeueContents = n
- squeue.Unlock()
- }
-
- if uuid != "" {
- squeue.Lock()
- defer squeue.Unlock()
- for _, k := range squeue.squeueContents {
- if k == uuid {
- return true, nil
- }
- }
- }
- return false, nil
-}
-
-func (squeue *Squeue) SyncSqueue(pollInterval time.Duration) {
- // TODO: considering using "squeue -i" instead of polling squeue.
- ticker := time.NewTicker(pollInterval)
- for {
- select {
- case <-squeueUpdater.SqueueDone:
- return
- case <-ticker.C:
- squeue.CheckSqueue("", true)
- }
- }
-}
-
-// Run or monitor a container.
-//
// If the container is marked as Locked, check if it is already in the slurm
// queue. If not, submit it.
//
// If the container is marked as Running, check if it is in the slurm queue.
// If not, mark it as Cancelled.
-//
-// Monitor status updates. If the priority changes to zero, cancel the
-// container using scancel.
-func run(dispatcher *dispatch.Dispatcher,
- container dispatch.Container,
- status chan dispatch.Container) {
-
- uuid := container.UUID
+func monitorSubmitOrCancel(dispatcher *dispatch.Dispatcher, container arvados.Container, monitorDone *bool) {
+ submitted := false
+ for !*monitorDone {
+ if squeueUpdater.CheckSqueue(container.UUID) {
+ // Found in the queue, so continue monitoring
+ submitted = true
+ } else if container.State == dispatch.Locked && !submitted {
+ // Not in queue but in Locked state and we haven't
+ // submitted it yet, so submit it.
- if container.State == dispatch.Locked {
- if inQ, err := squeueUpdater.CheckSqueue(container.UUID, true); err != nil {
- // maybe squeue is broken, put it back in the queue
- log.Printf("Error running squeue: %v", err)
- dispatcher.UpdateState(container.UUID, dispatch.Queued)
- } else if !inQ {
log.Printf("About to submit queued container %v", container.UUID)
- if _, err := submit(dispatcher, container, *crunchRunCommand); err != nil {
+ if err := submit(dispatcher, container, theConfig.CrunchRunCommand); err != nil {
log.Printf("Error submitting container %s to slurm: %v",
container.UUID, err)
// maybe sbatch is broken, put it back to queued
- dispatcher.UpdateState(container.UUID, dispatch.Queued)
+ dispatcher.Unlock(container.UUID)
+ }
+ submitted = true
+ } else {
+ // Not in queue and we are not going to submit it.
+ // Refresh the container state. If it is
+ // Complete/Cancelled, do nothing, if it is Locked then
+ // release it back to the Queue, if it is Running then
+ // clean up the record.
+
+ var con arvados.Container
+ err := dispatcher.Arv.Get("containers", container.UUID, nil, &con)
+ if err != nil {
+ log.Printf("Error getting final container state: %v", err)
+ }
+
+ switch con.State {
+ case dispatch.Locked:
+ log.Printf("Container %s in state %v but missing from slurm queue, changing to %v.",
+ container.UUID, con.State, dispatch.Queued)
+ dispatcher.Unlock(container.UUID)
+ case dispatch.Running:
+ st := dispatch.Cancelled
+ log.Printf("Container %s in state %v but missing from slurm queue, changing to %v.",
+ container.UUID, con.State, st)
+ dispatcher.UpdateState(container.UUID, st)
+ default:
+ // Container state is Queued, Complete or Cancelled so stop monitoring it.
+ return
}
}
}
+}
- log.Printf("Monitoring container %v started", uuid)
-
- // periodically check squeue
- doneSqueue := make(chan struct{})
- go func() {
- squeueUpdater.CheckSqueue(container.UUID, true)
- ticker := time.NewTicker(dispatcher.PollInterval)
- for {
- select {
- case <-ticker.C:
- if inQ, err := squeueUpdater.CheckSqueue(container.UUID, false); err != nil {
- log.Printf("Error running squeue: %v", err)
- // don't cancel, just leave it the way it is
- } else if !inQ {
- var con dispatch.Container
- err := dispatcher.Arv.Get("containers", uuid, nil, &con)
- if err != nil {
- log.Printf("Error getting final container state: %v", err)
- }
+// Run or monitor a container.
+//
+// Monitor status updates. If the priority changes to zero, cancel the
+// container using scancel.
+func run(dispatcher *dispatch.Dispatcher,
+ container arvados.Container,
+ status chan arvados.Container) {
- var st string
- switch con.State {
- case dispatch.Locked:
- st = dispatch.Queued
- case dispatch.Running:
- st = dispatch.Cancelled
- default:
- st = ""
- }
+ log.Printf("Monitoring container %v started", container.UUID)
+ defer log.Printf("Monitoring container %v finished", container.UUID)
- if st != "" {
- log.Printf("Container %s in state %v but missing from slurm queue, changing to %v.",
- uuid, con.State, st)
- dispatcher.UpdateState(uuid, st)
- }
- }
- case <-doneSqueue:
- close(doneSqueue)
- ticker.Stop()
- return
- }
- }
- }()
+ monitorDone := false
+ go monitorSubmitOrCancel(dispatcher, container, &monitorDone)
for container = range status {
if container.State == dispatch.Locked || container.State == dispatch.Running {
if container.Priority == 0 {
log.Printf("Canceling container %s", container.UUID)
- err := exec.Command("scancel", "--name="+container.UUID).Run()
+ // Mutex between squeue sync and running sbatch or scancel.
+ squeueUpdater.SlurmLock.Lock()
+ err := scancelCmd(container).Run()
+ squeueUpdater.SlurmLock.Unlock()
+
if err != nil {
log.Printf("Error stopping container %s with scancel: %v",
container.UUID, err)
- if inQ, err := squeueUpdater.CheckSqueue(container.UUID, true); err != nil {
- log.Printf("Error running squeue: %v", err)
- continue
- } else if inQ {
+ if squeueUpdater.CheckSqueue(container.UUID) {
log.Printf("Container %s is still in squeue after scancel.",
container.UUID)
continue
}
}
}
+ monitorDone = true
+}
- doneSqueue <- struct{}{}
-
- log.Printf("Monitoring container %v finished", uuid)
+func readConfig(dst interface{}, path string) error {
+ err := config.LoadFile(dst, path)
+ if err != nil && os.IsNotExist(err) && path == defaultConfigPath {
+ log.Printf("Config not specified. Continue with default configuration.")
+ err = nil
+ }
+ return err
}